From 33c030b3037da642e0d71cf56c4d8d27f61e620b Mon Sep 17 00:00:00 2001 From: z00512249 Date: Mon, 22 Mar 2021 21:06:31 +0800 Subject: [PATCH] add mnist stm32f746 example --- .../example/mnist_stm32f746/Core/Inc/main.h | 71 + .../example/mnist_stm32f746/Core/Src/main.c | 214 + .../micro/example/mnist_stm32f746/README.md | 174 + .../mnist_stm32f746/mnist/CMakeLists.txt | 59 + .../mnist/benchmark/CMakeLists.txt | 65 + .../mnist/benchmark/benchmark.cc | 136 + .../mnist/benchmark/benchmark.cmake | 7 + .../mnist/benchmark/load_input.c | 95 + .../mnist/benchmark/load_input.h | 36 + .../mnist_stm32f746/mnist/include/api/cell.h | 133 + .../mnist/include/api/context.h | 185 + .../mnist/include/api/data_type.h | 43 + .../mnist/include/api/dual_abi_helper.h | 164 + .../mnist_stm32f746/mnist/include/api/graph.h | 44 + .../mnist/include/api/lite_context.h | 71 + .../mnist_stm32f746/mnist/include/api/model.h | 62 + .../mnist/include/api/ops/ops.h | 48 + .../mnist/include/api/serialization.h | 47 + .../mnist/include/api/status.h | 164 + .../mnist_stm32f746/mnist/include/api/types.h | 122 + .../mnist_stm32f746/mnist/include/context.h | 66 + .../mnist_stm32f746/mnist/include/errorcode.h | 74 + .../mnist/include/ir/dtype/type_id.h | 95 + .../mnist/include/lite_session.h | 125 + .../mnist/include/lite_types.h | 36 + .../mnist/include/lite_utils.h | 412 + .../mnist_stm32f746/mnist/include/model.h | 66 + .../mnist_stm32f746/mnist/include/ms_tensor.h | 97 + .../mnist_stm32f746/mnist/include/version.h | 38 + .../CMSIS/Core/Include/cachel1_armv7.h | 411 + .../include/CMSIS/Core/Include/cmsis_armcc.h | 885 ++ .../CMSIS/Core/Include/cmsis_armclang.h | 1467 +++ .../CMSIS/Core/Include/cmsis_armclang_ltm.h | 1893 ++++ .../CMSIS/Core/Include/cmsis_compiler.h | 283 + .../include/CMSIS/Core/Include/cmsis_gcc.h | 2177 ++++ .../include/CMSIS/Core/Include/cmsis_iccarm.h | 968 ++ .../CMSIS/Core/Include/cmsis_version.h | 39 + .../CMSIS/Core/Include/core_armv81mml.h | 4191 ++++++++ .../CMSIS/Core/Include/core_armv8mbl.h | 2222 ++++ .../CMSIS/Core/Include/core_armv8mml.h | 3196 ++++++ .../include/CMSIS/Core/Include/core_cm0.h | 952 ++ .../include/CMSIS/Core/Include/core_cm0plus.h | 1087 ++ .../include/CMSIS/Core/Include/core_cm1.h | 979 ++ .../include/CMSIS/Core/Include/core_cm23.h | 2297 +++++ .../include/CMSIS/Core/Include/core_cm3.h | 1943 ++++ .../include/CMSIS/Core/Include/core_cm33.h | 3264 ++++++ .../include/CMSIS/Core/Include/core_cm35p.h | 3264 ++++++ .../include/CMSIS/Core/Include/core_cm4.h | 2129 ++++ .../include/CMSIS/Core/Include/core_cm55.h | 4215 ++++++++ .../include/CMSIS/Core/Include/core_cm7.h | 2362 +++++ .../include/CMSIS/Core/Include/core_sc000.h | 1030 ++ .../include/CMSIS/Core/Include/core_sc300.h | 1917 ++++ .../include/CMSIS/Core/Include/mpu_armv7.h | 275 + .../include/CMSIS/Core/Include/mpu_armv8.h | 352 + .../include/CMSIS/Core/Include/pmu_armv8.h | 337 + .../include/CMSIS/Core/Include/tz_context.h | 70 + .../CMSIS/DSP/Include/arm_common_tables.h | 517 + .../CMSIS/DSP/Include/arm_const_structs.h | 76 + .../CMSIS/DSP/Include/arm_helium_utils.h | 348 + .../include/CMSIS/DSP/Include/arm_math.h | 8970 +++++++++++++++++ .../CMSIS/DSP/Include/arm_mve_tables.h | 235 + .../include/CMSIS/DSP/Include/arm_vec_math.h | 372 + .../include/CMSIS/NN/Include/arm_nn_tables.h | 56 + .../CMSIS/NN/Include/arm_nnfunctions.h | 2184 ++++ .../CMSIS/NN/Include/arm_nnsupportfunctions.h | 905 ++ .../operator_library/include/nnacl/adder.h | 32 + .../include/nnacl/arg_min_max_parameter.h | 54 + .../include/nnacl/arithmetic.h | 46 + .../include/nnacl/arithmetic_self_parameter.h | 30 + .../include/nnacl/assembly_global.h | 33 + .../include/nnacl/base/arithmetic_base.h | 34 + .../include/nnacl/base/batch_to_space_base.h | 33 + .../include/nnacl/base/cast_base.h | 104 + .../include/nnacl/base/concat_base.h | 32 + .../include/nnacl/base/conv1x1_base.h | 32 + .../include/nnacl/base/depth_to_space_base.h | 30 + .../include/nnacl/base/fill_base.h | 32 + .../include/nnacl/base/gather_base.h | 33 + .../nnacl/base/minimal_filtering_generator.h | 63 + .../include/nnacl/base/slice_base.h | 34 + .../include/nnacl/base/space_to_depth_base.h | 30 + .../include/nnacl/base/split_base.h | 32 + .../include/nnacl/base/stack_base.h | 30 + .../include/nnacl/base/tile_base.h | 53 + .../include/nnacl/base/unstack_base.h | 32 + .../include/nnacl/base/zeroslike_base.h | 34 + .../include/nnacl/batch_to_space.h | 31 + .../include/nnacl/batchnorm_parameter.h | 32 + .../include/nnacl/broadcast_to_parameter.h | 34 + .../include/nnacl/cast_parameter.h | 27 + .../include/nnacl/common_func.h | 77 + .../include/nnacl/concat_parameter.h | 35 + .../nnacl/constant_of_shape_parameter.h | 31 + .../include/nnacl/conv_parameter.h | 131 + .../include/nnacl/crop_parameter.h | 36 + .../include/nnacl/depth_to_space_parameter.h | 35 + .../nnacl/detection_post_process_parameter.h | 48 + .../include/nnacl/errorcode.h | 62 + .../include/nnacl/fill_parameter.h | 28 + .../include/nnacl/fp32/activation_fp32.h | 49 + .../include/nnacl/fp32/add_fp32.h | 45 + .../include/nnacl/fp32/adder_fp32.h | 47 + .../include/nnacl/fp32/arg_min_max_fp32.h | 31 + .../nnacl/fp32/arithmetic_compare_fp32.h | 50 + .../include/nnacl/fp32/arithmetic_fp32.h | 70 + .../include/nnacl/fp32/arithmetic_self_fp32.h | 62 + .../include/nnacl/fp32/batchnorm_fp32.h | 37 + .../include/nnacl/fp32/broadcast_to_fp32.h | 30 + .../include/nnacl/fp32/common_func_fp32.h | 105 + .../nnacl/fp32/constant_of_shape_fp32.h | 45 + .../include/nnacl/fp32/conv_common_fp32.h | 40 + .../include/nnacl/fp32/conv_depthwise_fp32.h | 91 + .../include/nnacl/fp32/conv_winograd_fp32.h | 44 + .../include/nnacl/fp32/crop_fp32.h | 34 + .../include/nnacl/fp32/deconv_fp32.h | 37 + .../include/nnacl/fp32/deconv_winograd_fp32.h | 43 + .../nnacl/fp32/detection_post_process_fp32.h | 59 + .../include/nnacl/fp32/div_fp32.h | 43 + .../include/nnacl/fp32/elu_fp32.h | 39 + .../nnacl/fp32/embedding_lookup_fp32.h | 43 + .../include/nnacl/fp32/exp_fp32.h | 100 + .../include/nnacl/fp32/gatherNd_fp32.h | 35 + .../include/nnacl/fp32/gru_fp32.h | 30 + .../include/nnacl/fp32/instance_norm_fp32.h | 32 + .../nnacl/fp32/invert_permutation_fp32.h | 27 + .../include/nnacl/fp32/l2_norm_fp32.h | 34 + .../include/nnacl/fp32/layer_norm_fp32.h | 32 + .../nnacl/fp32/local_response_norm_fp32.h | 40 + .../include/nnacl/fp32/lstm_fp32.h | 43 + .../include/nnacl/fp32/matmul_fp32.h | 83 + .../include/nnacl/fp32/mul_fp32.h | 49 + .../include/nnacl/fp32/one_hot_fp32.h | 47 + .../include/nnacl/fp32/pack_fp32.h | 70 + .../include/nnacl/fp32/pad_fp32.h | 41 + .../include/nnacl/fp32/pooling_fp32.h | 38 + .../include/nnacl/fp32/power_fp32.h | 32 + .../include/nnacl/fp32/prelu_fp32.h | 60 + .../include/nnacl/fp32/prior_box_fp32.h | 38 + .../include/nnacl/fp32/range_fp32.h | 50 + .../include/nnacl/fp32/rank_fp32.h | 32 + .../include/nnacl/fp32/reduce_fp32.h | 58 + .../include/nnacl/fp32/resize_fp32.h | 71 + .../include/nnacl/fp32/reverse_fp32.h | 41 + .../nnacl/fp32/reverse_sequence_fp32.h | 33 + .../include/nnacl/fp32/roi_pooling_fp32.h | 54 + .../include/nnacl/fp32/scale_fp32.h | 35 + .../include/nnacl/fp32/scatter_nd_fp32.h | 30 + .../include/nnacl/fp32/softmax_fp32.h | 31 + .../include/nnacl/fp32/space_to_batch_fp32.h | 49 + .../include/nnacl/fp32/sparse_to_dense_fp32.h | 30 + .../include/nnacl/fp32/splice_fp32.h | 31 + .../include/nnacl/fp32/squared_difference.h | 37 + .../include/nnacl/fp32/strided_slice_fp32.h | 33 + .../include/nnacl/fp32/sub_fp32.h | 43 + .../include/nnacl/fp32/topk_fp32.h | 47 + .../include/nnacl/fp32/transpose_fp32.h | 35 + .../include/nnacl/fp32/unique_fp32.h | 35 + .../include/nnacl/fp32/where_fp32.h | 31 + .../include/nnacl/fp32/winograd_transform.h | 43 + .../include/nnacl/fp32/winograd_utils.h | 316 + .../include/nnacl/gather_parameter.h | 28 + .../include/nnacl/gelu_parameter.h | 28 + .../include/nnacl/gru_parameter.h | 37 + .../include/nnacl/instance_norm_parameter.h | 32 + .../include/nnacl/int8/add_int8.h | 73 + .../include/nnacl/int8/arg_min_max_int8.h | 41 + .../include/nnacl/int8/arithmetic_int8.h | 51 + .../include/nnacl/int8/arithmetic_self_int8.h | 59 + .../include/nnacl/int8/batch_to_space_int8.h | 32 + .../include/nnacl/int8/batchnorm_int8.h | 34 + .../include/nnacl/int8/common_func_int8.h | 94 + .../include/nnacl/int8/concat_int8.h | 32 + .../include/nnacl/int8/conv1x1_int8.h | 45 + .../include/nnacl/int8/conv3x3_int8.h | 48 + .../include/nnacl/int8/conv_depthwise_int8.h | 49 + .../include/nnacl/int8/conv_int8.h | 44 + .../include/nnacl/int8/crop_int8.h | 34 + .../include/nnacl/int8/deconv_int8.h | 46 + .../include/nnacl/int8/depth_to_space_int8.h | 31 + .../include/nnacl/int8/div_int8.h | 33 + .../include/nnacl/int8/fixed_point.h | 74 + .../include/nnacl/int8/gatherNd_int8.h | 31 + .../include/nnacl/int8/gather_int8.h | 32 + .../include/nnacl/int8/hswish_int8.h | 43 + .../include/nnacl/int8/l2_norm_int8.h | 32 + .../include/nnacl/int8/layer_norm_int8.h | 34 + .../include/nnacl/int8/leaky_relu_int8.h | 31 + .../include/nnacl/int8/matmul_int8.h | 83 + .../include/nnacl/int8/mul_int8.h | 38 + .../include/nnacl/int8/pack_int8.h | 62 + .../include/nnacl/int8/pad_int8.h | 35 + .../include/nnacl/int8/pooling_int8.h | 44 + .../include/nnacl/int8/power_int8.h | 32 + .../nnacl/int8/quant_dtype_cast_int8.h | 42 + .../include/nnacl/int8/quantize.h | 219 + .../include/nnacl/int8/reduce_int8.h | 70 + .../include/nnacl/int8/relux_int8.h | 43 + .../include/nnacl/int8/reshape_int8.h | 32 + .../include/nnacl/int8/resize_int8.h | 50 + .../include/nnacl/int8/scale_int8.h | 35 + .../include/nnacl/int8/sigmoid_int8.h | 32 + .../include/nnacl/int8/slice_int8.h | 34 + .../include/nnacl/int8/softmax_int8.h | 35 + .../include/nnacl/int8/space_to_batch_int8.h | 32 + .../include/nnacl/int8/splice_int8.h | 30 + .../include/nnacl/int8/split_int8.h | 33 + .../include/nnacl/int8/squeeze_int8.h | 32 + .../include/nnacl/int8/sub_int8.h | 31 + .../include/nnacl/int8/tanh_int8.h | 43 + .../include/nnacl/int8/topk_int8.h | 36 + .../include/nnacl/int8/transpose_int8.h | 35 + .../include/nnacl/int8/unsqueeze_int8.h | 32 + .../nnacl/intrinsics/avx/common_utils.h | 44 + .../nnacl/intrinsics/ms_simd_instructions.h | 226 + .../include/nnacl/intrinsics/sse/sse_common.h | 56 + .../include/nnacl/l2_norm_parameter.h | 41 + .../include/nnacl/layer_norm_parameter.h | 50 + .../include/nnacl/lsh_projection_parameter.h | 35 + .../include/nnacl/lstm_parameter.h | 43 + .../include/nnacl/matmul_parameter.h | 79 + .../include/nnacl/mul_parameter.h | 40 + .../include/nnacl/nnacl_common.h | 60 + .../include/nnacl/nnacl_utils.h | 39 + .../nnacl/non_max_suppression_parameter.h | 28 + .../operator_library/include/nnacl/op_base.h | 107 + .../operator_library/include/nnacl/pack.h | 31 + .../include/nnacl/pad_parameter.h | 51 + .../include/nnacl/pooling_parameter.h | 57 + .../include/nnacl/power_parameter.h | 41 + .../include/nnacl/predict_parameter.h | 32 + .../include/nnacl/prelu_parameter.h | 32 + .../include/nnacl/prior_box_parameter.h | 40 + .../include/nnacl/random_parameter.h | 27 + .../include/nnacl/reduce_parameter.h | 34 + .../include/nnacl/reshape_parameter.h | 40 + .../include/nnacl/resize_parameter.h | 37 + .../nnacl/reverse_sequence_parameter.h | 45 + .../operator_library/include/nnacl/scale.h | 46 + .../include/nnacl/sigmoid_parameter.h | 41 + .../include/nnacl/skip_gram_parameter.h | 30 + .../include/nnacl/slice_parameter.h | 45 + .../include/nnacl/softmax_parameter.h | 35 + .../include/nnacl/space_to_depth_parameter.h | 26 + .../include/nnacl/sparse_to_dense_parameter.h | 31 + .../include/nnacl/splice_parameter.h | 29 + .../include/nnacl/split_parameter.h | 47 + .../include/nnacl/squeeze_parameter.h | 46 + .../include/nnacl/stack_parameter.h | 27 + .../include/nnacl/strided_slice_parameter.h | 43 + .../operator_library/include/nnacl/tensor_c.h | 29 + .../include/nnacl/tensorlist_parameter.h | 32 + .../include/nnacl/transpose.h | 40 + .../include/nnacl/unsqueeze_parameter.h | 48 + .../include/nnacl/unstack_parameter.h | 34 + .../include/nnacl/upsample_parameter.h | 29 + .../include/nnacl/where_parameter.h | 35 + .../include/wrapper/base/common_wrapper.h | 24 + .../detection_post_process_base_wrapper.h | 24 + .../wrapper/base/optimize_handler_wrapper.h | 41 + .../fp32/dequant_int8_to_fp32_wrapper.h | 48 + .../wrapper/fp32/matmul_fp32_wrapper.h | 33 + .../include/wrapper/int8/add_int8_wrapper.h | 49 + .../wrapper/int8/batchnorm_int8_wrapper.h | 32 + .../wrapper/int8/concat_int8_wrapper.h | 35 + .../wrapper/int8/conv1x1_init_int8_wrapper.h | 28 + .../wrapper/int8/conv1x1_run_int8_wrapper.h | 48 + .../wrapper/int8/conv_init_int8_wrapper.h | 26 + .../int8/convolution_depthwise_int8_wrapper.h | 35 + .../wrapper/int8/convolution_int8_wrapper.h | 41 + .../wrapper/int8/matmul_int8_wrapper.h | 36 + .../wrapper/int8/resize_int8_wrapper.h | 41 + .../include/wrapper/int8/slice_int8_wrapper.h | 31 + .../NN/ConvolutionFunctions/CMakeLists.txt | 19 + .../arm_convolve_1_x_n_s8.c | 193 + .../arm_convolve_1x1_HWC_q7_fast_nonsquare.c | 236 + .../arm_convolve_1x1_s8_fast.c | 185 + .../arm_convolve_HWC_q15_basic.c | 207 + .../arm_convolve_HWC_q15_fast.c | 255 + .../arm_convolve_HWC_q15_fast_nonsquare.c | 265 + .../arm_convolve_HWC_q7_RGB.c | 279 + .../arm_convolve_HWC_q7_basic.c | 231 + .../arm_convolve_HWC_q7_basic_nonsquare.c | 229 + .../arm_convolve_HWC_q7_fast.c | 408 + .../arm_convolve_HWC_q7_fast_nonsquare.c | 379 + .../NN/ConvolutionFunctions/arm_convolve_s8.c | 359 + .../arm_depthwise_conv_3x3_s8.c | 209 + .../arm_depthwise_conv_s8.c | 261 + .../arm_depthwise_conv_s8_opt.c | 440 + .../arm_depthwise_conv_u8_basic_ver1.c | 239 + .../arm_depthwise_separable_conv_HWC_q7.c | 418 + ...epthwise_separable_conv_HWC_q7_nonsquare.c | 413 + .../arm_nn_depthwise_conv_s8_core.c | 219 + .../arm_nn_mat_mult_kernel_q7_q15.c | 179 + .../arm_nn_mat_mult_kernel_q7_q15_reordered.c | 129 + .../arm_nn_mat_mult_kernel_s8_s16.c | 391 + .../arm_nn_mat_mult_kernel_s8_s16_reordered.c | 201 + .../ConvolutionFunctions/arm_nn_mat_mult_s8.c | 173 + .../NN/FullyConnectedFunctions/CMakeLists.txt | 19 + .../arm_fully_connected_mat_q7_vec_q15.c | 199 + .../arm_fully_connected_mat_q7_vec_q15_opt.c | 404 + .../arm_fully_connected_q15.c | 193 + .../arm_fully_connected_q15_opt.c | 332 + .../arm_fully_connected_q7.c | 198 + .../arm_fully_connected_q7_opt.c | 484 + .../arm_fully_connected_s8.c | 289 + .../NN/NNSupportFunctions/CMakeLists.txt | 19 + .../arm_nn_accumulate_q7_to_q15.c | 85 + .../NN/NNSupportFunctions/arm_nn_add_q7.c | 81 + .../arm_nn_depthwise_conv_nt_t_padded_s8.c | 169 + .../arm_nn_depthwise_conv_nt_t_s8.c | 171 + .../arm_nn_mat_mul_core_1x_s8.c | 91 + .../arm_nn_mat_mul_core_4x_s8.c | 118 + .../arm_nn_mat_mult_nt_t_s8.c | 572 ++ .../NN/NNSupportFunctions/arm_nn_mult_q15.c | 145 + .../NN/NNSupportFunctions/arm_nn_mult_q7.c | 118 + .../arm_nn_vec_mat_mult_t_s8.c | 356 + .../NN/NNSupportFunctions/arm_nntables.c | 297 + .../arm_q7_to_q15_no_shift.c | 122 + .../arm_q7_to_q15_reordered_with_offset.c | 100 + .../arm_q7_to_q15_with_offset.c | 117 + .../CMSIS/NN/PoolingFunctions/CMakeLists.txt | 19 + .../NN/PoolingFunctions/arm_avgpool_s8.c | 279 + .../NN/PoolingFunctions/arm_max_pool_s8.c | 106 + .../NN/PoolingFunctions/arm_max_pool_s8_opt.c | 335 + .../NN/PoolingFunctions/arm_pool_q7_HWC.c | 454 + .../kernels/nnacl/fp32/exp_fp32.c | 51 + .../kernels/nnacl/fp32/softmax_fp32.c | 146 + .../nnacl/int8/quant_dtype_cast_int8.c | 125 + .../mnist/operator_library/src/string.cc | 288 + .../mnist_stm32f746/mnist/src/CMakeLists.txt | 85 + .../mnist/src/mnist_input_data.h | 200 + .../example/mnist_stm32f746/mnist/src/net.c | 103 + .../mnist_stm32f746/mnist/src/net.cmake | 25 + .../example/mnist_stm32f746/mnist/src/net.h | 50 + .../mnist_stm32f746/mnist/src/session.cc | 146 + .../mnist_stm32f746/mnist/src/session.h | 71 + .../mnist_stm32f746/mnist/src/tensor.cc | 83 + .../mnist_stm32f746/mnist/src/tensor.h | 66 + .../mnist_stm32f746/mnist/src/weight.c | 219 + .../mnist_stm32f746/mnist/src/weight.h | 36 + 340 files changed, 84298 insertions(+) create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/Core/Inc/main.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/Core/Src/main.c create mode 100644 mindspore/lite/micro/example/mnist_stm32f746/README.md create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/CMakeLists.txt create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/CMakeLists.txt create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/benchmark.cc create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/benchmark.cmake create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/load_input.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/load_input.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/cell.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/context.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/data_type.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/dual_abi_helper.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/graph.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/lite_context.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/model.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/ops/ops.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/serialization.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/status.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/types.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/include/context.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/include/errorcode.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/include/ir/dtype/type_id.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/include/lite_session.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/include/lite_types.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/include/lite_utils.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/include/model.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/include/ms_tensor.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/include/version.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cachel1_armv7.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_armcc.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_armclang.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_armclang_ltm.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_compiler.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_gcc.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_iccarm.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_version.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_armv81mml.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_armv8mbl.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_armv8mml.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm0.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm0plus.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm1.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm23.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm3.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm33.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm35p.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm4.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm55.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm7.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_sc000.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_sc300.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/mpu_armv7.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/mpu_armv8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/pmu_armv8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/tz_context.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_common_tables.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_const_structs.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_helium_utils.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_math.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_mve_tables.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_vec_math.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/NN/Include/arm_nn_tables.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/NN/Include/arm_nnfunctions.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/NN/Include/arm_nnsupportfunctions.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/adder.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/arg_min_max_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/arithmetic.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/arithmetic_self_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/assembly_global.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/arithmetic_base.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/batch_to_space_base.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/cast_base.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/concat_base.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/conv1x1_base.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/depth_to_space_base.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/fill_base.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/gather_base.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/minimal_filtering_generator.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/slice_base.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/space_to_depth_base.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/split_base.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/stack_base.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/tile_base.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/unstack_base.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/zeroslike_base.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/batch_to_space.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/batchnorm_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/broadcast_to_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/cast_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/common_func.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/concat_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/constant_of_shape_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/conv_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/crop_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/depth_to_space_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/detection_post_process_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/errorcode.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fill_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/activation_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/add_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/adder_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/arg_min_max_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/arithmetic_compare_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/arithmetic_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/arithmetic_self_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/batchnorm_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/broadcast_to_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/common_func_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/constant_of_shape_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/conv_common_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/conv_depthwise_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/conv_winograd_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/crop_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/deconv_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/deconv_winograd_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/detection_post_process_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/div_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/elu_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/embedding_lookup_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/exp_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/gatherNd_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/gru_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/instance_norm_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/invert_permutation_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/l2_norm_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/layer_norm_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/local_response_norm_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/lstm_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/matmul_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/mul_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/one_hot_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/pack_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/pad_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/pooling_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/power_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/prelu_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/prior_box_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/range_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/rank_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/reduce_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/resize_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/reverse_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/reverse_sequence_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/roi_pooling_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/scale_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/scatter_nd_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/softmax_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/space_to_batch_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/sparse_to_dense_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/splice_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/squared_difference.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/strided_slice_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/sub_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/topk_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/transpose_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/unique_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/where_fp32.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/winograd_transform.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/winograd_utils.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/gather_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/gelu_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/gru_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/instance_norm_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/add_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/arg_min_max_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/arithmetic_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/arithmetic_self_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/batch_to_space_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/batchnorm_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/common_func_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/concat_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/conv1x1_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/conv3x3_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/conv_depthwise_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/conv_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/crop_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/deconv_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/depth_to_space_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/div_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/fixed_point.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/gatherNd_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/gather_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/hswish_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/l2_norm_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/layer_norm_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/leaky_relu_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/matmul_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/mul_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/pack_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/pad_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/pooling_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/power_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/quant_dtype_cast_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/quantize.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/reduce_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/relux_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/reshape_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/resize_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/scale_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/sigmoid_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/slice_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/softmax_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/space_to_batch_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/splice_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/split_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/squeeze_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/sub_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/tanh_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/topk_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/transpose_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/unsqueeze_int8.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/intrinsics/avx/common_utils.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/intrinsics/ms_simd_instructions.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/intrinsics/sse/sse_common.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/l2_norm_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/layer_norm_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/lsh_projection_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/lstm_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/matmul_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/mul_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/nnacl_common.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/nnacl_utils.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/non_max_suppression_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/op_base.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/pack.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/pad_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/pooling_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/power_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/predict_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/prelu_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/prior_box_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/random_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/reduce_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/reshape_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/resize_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/reverse_sequence_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/scale.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/sigmoid_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/skip_gram_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/slice_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/softmax_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/space_to_depth_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/sparse_to_dense_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/splice_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/split_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/squeeze_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/stack_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/strided_slice_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/tensor_c.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/tensorlist_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/transpose.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/unsqueeze_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/unstack_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/upsample_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/where_parameter.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/base/common_wrapper.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/base/detection_post_process_base_wrapper.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/base/optimize_handler_wrapper.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/fp32/dequant_int8_to_fp32_wrapper.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/fp32/matmul_fp32_wrapper.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/add_int8_wrapper.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/batchnorm_int8_wrapper.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/concat_int8_wrapper.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/conv1x1_init_int8_wrapper.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/conv1x1_run_int8_wrapper.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/conv_init_int8_wrapper.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/convolution_depthwise_int8_wrapper.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/convolution_int8_wrapper.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/matmul_int8_wrapper.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/resize_int8_wrapper.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/slice_int8_wrapper.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/CMakeLists.txt create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_1_x_n_s8.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_1x1_HWC_q7_fast_nonsquare.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_1x1_s8_fast.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q15_basic.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q15_fast.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q15_fast_nonsquare.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q7_RGB.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q7_basic.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q7_basic_nonsquare.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q7_fast.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q7_fast_nonsquare.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_s8.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_conv_3x3_s8.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_conv_s8.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_conv_s8_opt.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_conv_u8_basic_ver1.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7_nonsquare.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_depthwise_conv_s8_core.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15_reordered.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16_reordered.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_mat_mult_s8.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/CMakeLists.txt create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15_opt.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_q15.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_q15_opt.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_q7.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_q7_opt.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_s8.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/CMakeLists.txt create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_accumulate_q7_to_q15.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_add_q7.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_padded_s8.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s8.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_mat_mul_core_1x_s8.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_mat_mul_core_4x_s8.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_mat_mult_nt_t_s8.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_mult_q15.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_mult_q7.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_vec_mat_mult_t_s8.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nntables.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_q7_to_q15_no_shift.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_q7_to_q15_reordered_with_offset.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_q7_to_q15_with_offset.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/PoolingFunctions/CMakeLists.txt create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/PoolingFunctions/arm_avgpool_s8.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/PoolingFunctions/arm_max_pool_s8.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/PoolingFunctions/arm_max_pool_s8_opt.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/PoolingFunctions/arm_pool_q7_HWC.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/nnacl/fp32/exp_fp32.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/nnacl/fp32/softmax_fp32.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/nnacl/int8/quant_dtype_cast_int8.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/src/string.cc create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/src/CMakeLists.txt create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/src/mnist_input_data.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/src/net.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/src/net.cmake create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/src/net.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/src/session.cc create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/src/session.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/src/tensor.cc create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/src/tensor.h create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/src/weight.c create mode 100755 mindspore/lite/micro/example/mnist_stm32f746/mnist/src/weight.h diff --git a/mindspore/lite/micro/example/mnist_stm32f746/Core/Inc/main.h b/mindspore/lite/micro/example/mnist_stm32f746/Core/Inc/main.h new file mode 100755 index 00000000000..ea74fe99616 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/Core/Inc/main.h @@ -0,0 +1,71 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file : main.h + * @brief : Header for main.c file. + * This file contains the common defines of the application. + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2021 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ +/* USER CODE END Header */ + +/* Define to prevent recursive inclusion -------------------------------------*/ +#ifndef __MAIN_H +#define __MAIN_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Includes ------------------------------------------------------------------*/ +#include "stm32f7xx_hal.h" + +/* Private includes ----------------------------------------------------------*/ +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +/* Exported types ------------------------------------------------------------*/ +/* USER CODE BEGIN ET */ + +/* USER CODE END ET */ + +/* Exported constants --------------------------------------------------------*/ +/* USER CODE BEGIN EC */ + +/* USER CODE END EC */ + +/* Exported macro ------------------------------------------------------------*/ +/* USER CODE BEGIN EM */ + +/* USER CODE END EM */ + +/* Exported functions prototypes ---------------------------------------------*/ +void Error_Handler(void); + +/* USER CODE BEGIN EFP */ + +/* USER CODE END EFP */ + +/* Private defines -----------------------------------------------------------*/ +/* USER CODE BEGIN Private defines */ + +/* USER CODE END Private defines */ + +#ifdef __cplusplus +} +#endif + +#endif /* __MAIN_H */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/Core/Src/main.c b/mindspore/lite/micro/example/mnist_stm32f746/Core/Src/main.c new file mode 100755 index 00000000000..dc619ddae5d --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/Core/Src/main.c @@ -0,0 +1,214 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file : main.c + * @brief : Main program body + ****************************************************************************** + * @attention + * + *

© Copyright (c) 2021 STMicroelectronics. + * All rights reserved.

+ * + * This software component is licensed by ST under BSD 3-Clause license, + * the "License"; You may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * opensource.org/licenses/BSD-3-Clause + * + ****************************************************************************** + */ +/* USER CODE END Header */ +/* Includes ------------------------------------------------------------------*/ +#include "main.h" +#include "SEGGER_RTT.h" +#include "include/errorcode.h" +#include "include/lite_session.h" +#include "include/ms_tensor.h" +#include "mnist_input_data.h" +// #include + +using namespace mindspore; +/* Private includes ----------------------------------------------------------*/ +/* USER CODE BEGIN Includes */ + +/* USER CODE END Includes */ + +/* Private typedef -----------------------------------------------------------*/ +/* USER CODE BEGIN PTD */ + +/* USER CODE END PTD */ + +/* Private define ------------------------------------------------------------*/ +/* USER CODE BEGIN PD */ +/* USER CODE END PD */ + +/* Private macro -------------------------------------------------------------*/ +/* USER CODE BEGIN PM */ + +/* USER CODE END PM */ + +/* Private variables ---------------------------------------------------------*/ + +/* USER CODE BEGIN PV */ + +/* USER CODE END PV */ + +/* Private function prototypes -----------------------------------------------*/ +void SystemClock_Config(void); +/* USER CODE BEGIN PFP */ + +/* USER CODE END PFP */ + +/* Private user code ---------------------------------------------------------*/ +/* USER CODE BEGIN 0 */ + +/* USER CODE END 0 */ + +/** + * @brief The application entry point. + * @retval int + */ +int main(void) { + /* USER CODE BEGIN 1 */ + + /* USER CODE END 1 */ + + /* MCU Configuration--------------------------------------------------------*/ + + /* Reset of all peripherals, Initializes the Flash interface and the Systick. */ + HAL_Init(); + + /* USER CODE BEGIN Init */ + + /* USER CODE END Init */ + + /* Configure the system clock */ + SystemClock_Config(); + + /* USER CODE BEGIN SysInit */ + + /* USER CODE END SysInit */ + + /* Initialize all configured peripherals */ + /* USER CODE BEGIN 2 */ + + /* USER CODE END 2 */ + + /* Infinite loop */ + /* USER CODE BEGIN WHILE */ + // float inputs_binbuf[784] = {0}; + while (1) { + /* USER CODE END WHILE */ + SEGGER_RTT_printf(0, "***********mnist test start***********\n"); + const char *model_buffer = nullptr; + int model_size = 0; + session::LiteSession *session = mindspore::session::LiteSession::CreateSession(model_buffer, model_size, nullptr); + Vector inputs = session->GetInputs(); + size_t inputs_num = inputs.size(); + void *inputs_binbuf[inputs_num]; + int inputs_size[inputs_num]; + for (size_t i = 0; i < inputs_num; ++i) { + inputs_size[i] = inputs[i]->Size(); + } + // here mnist only have one input data,just hard code to it's array; + inputs_binbuf[0] = mnist_inputs_data; + for (size_t i = 0; i < inputs_num; ++i) { + void *input_data = inputs[i]->MutableData(); + memcpy(input_data, inputs_binbuf[i], inputs_size[i]); + } + int ret = session->RunGraph(); + if (ret != lite::RET_OK) { + return lite::RET_ERROR; + } + Vector outputs_name = session->GetOutputTensorNames(); + for (int i = 0; i < outputs_name.size(); ++i) { + tensor::MSTensor *output_tensor = session->GetOutputByTensorName(outputs_name[i]); + if (output_tensor == nullptr) { + return -1; + } + SEGGER_RTT_printf(0, "***********mnist test start5.2***********\n"); + float *casted_data = static_cast(output_tensor->MutableData()); + if (casted_data == nullptr) { + return -1; + } + SEGGER_RTT_printf(0, "***********mnist test start5.3***********\n"); + for (size_t j = 0; j < 10 && j < output_tensor->ElementsNum(); j++) { + SEGGER_RTT_printf(0, "output: [%d] is : [%d]/100\n", i, casted_data[i] * 100); + } + } + delete session; + SEGGER_RTT_printf(0, "***********mnist test end***********\n"); + /* USER CODE BEGIN 3 */ + } + /* USER CODE END 3 */ +} + +/** + * @brief System Clock Configuration + * @retval None + */ +void SystemClock_Config(void) { + RCC_OscInitTypeDef RCC_OscInitStruct = {0}; + RCC_ClkInitTypeDef RCC_ClkInitStruct = {0}; + + /** Configure the main internal regulator output voltage + */ + __HAL_RCC_PWR_CLK_ENABLE(); + __HAL_PWR_VOLTAGESCALING_CONFIG(PWR_REGULATOR_VOLTAGE_SCALE3); + /** Initializes the RCC Oscillators according to the specified parameters + * in the RCC_OscInitTypeDef structure. + */ + RCC_OscInitStruct.OscillatorType = RCC_OSCILLATORTYPE_HSI; + RCC_OscInitStruct.HSIState = RCC_HSI_ON; + RCC_OscInitStruct.HSICalibrationValue = RCC_HSICALIBRATION_DEFAULT; + RCC_OscInitStruct.PLL.PLLState = RCC_PLL_NONE; + if (HAL_RCC_OscConfig(&RCC_OscInitStruct) != HAL_OK) { + Error_Handler(); + } + /** Initializes the CPU, AHB and APB buses clocks + */ + RCC_ClkInitStruct.ClockType = RCC_CLOCKTYPE_HCLK | RCC_CLOCKTYPE_SYSCLK + | RCC_CLOCKTYPE_PCLK1 | RCC_CLOCKTYPE_PCLK2; + RCC_ClkInitStruct.SYSCLKSource = RCC_SYSCLKSOURCE_HSI; + RCC_ClkInitStruct.AHBCLKDivider = RCC_SYSCLK_DIV1; + RCC_ClkInitStruct.APB1CLKDivider = RCC_HCLK_DIV1; + RCC_ClkInitStruct.APB2CLKDivider = RCC_HCLK_DIV1; + + if (HAL_RCC_ClockConfig(&RCC_ClkInitStruct, FLASH_LATENCY_0) != HAL_OK) { + Error_Handler(); + } +} + +/* USER CODE BEGIN 4 */ + +/* USER CODE END 4 */ + +/** + * @brief This function is executed in case of error occurrence. + * @retval None + */ +void Error_Handler(void) { + /* USER CODE BEGIN Error_Handler_Debug */ + /* User can add his own implementation to report the HAL error return state */ + __disable_irq(); + while (1) { + } + /* USER CODE END Error_Handler_Debug */ +} + +#ifdef USE_FULL_ASSERT +/** + * @brief Reports the name of the source file and the source line number + * where the assert_param error has occurred. + * @param file: pointer to the source file name + * @param line: assert_param error line source number + * @retval None + */ +void assert_failed(uint8_t *file, uint32_t line) { + /* USER CODE BEGIN 6 */ + /* User can add his own implementation to report the file name and line number, + ex: printf("Wrong parameters value: file %s on line %d\r\n", file, line) */ + /* USER CODE END 6 */ +} +#endif /* USE_FULL_ASSERT */ + +/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/README.md b/mindspore/lite/micro/example/mnist_stm32f746/README.md new file mode 100644 index 00000000000..654af301df9 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/README.md @@ -0,0 +1,174 @@ +# Arm Cortex-M编译部署 + + `Linux` `Cortex-M` `IOT` `C/C++` `全流程` `模型编译` `模型代码生成` `模型部署` `推理应用` `初级` `中级` `高级` + + + +- Arm Cortex-M编译部署 + - [STM32F746编译依赖](#STM32F746编译依赖) + - [STM32F746构建](#STM32F746构建) + - [STM32F746工程部署](#STM32F746工程部署) + - [更多详情](#更多详情) + - [Linux x86_64平台编译部署](#Linux x86_64平台编译部署) + - [Android平台编译部署](#STM32746平台编译部署) + + + +## Arm Cortex-M编译部署 + +本教程以在STM32F746单板上编译部署生成模型代码为例,演示了codegen编译模型在Cortex-M平台的使用。更多关于Arm Cortex-M的详情可参见其[官网](https://developer.arm.com/ip-products/processors/cortex-m)。 + +### STM32F746编译依赖 + +模型推理代码的编译部署需要在windows上安装[Jlink]((https://www.segger.com/))、[STM32CubeMX](https://www.st.com/content/st_com/en.html)、[gcc-arm-none-ebai](https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/gnu-rm)等工具来进行交叉编译。 + +- [STM32CubeMX-Win](https://www.st.com/content/ccc/resource/technical/software/sw_development_suite/group0/0b/05/f0/25/c7/2b/42/9d/stm32cubemx_v6-1-1/files/stm32cubemx_v6-1-1.zip/jcr:content/translations/en.stm32cubemx_v6-1-1.zip) >= 6.0.1 + +- [gcc-arm-none-eabi](https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/gnu-rm/downloads) >= 9-2019-q4-major-win32 + +- [JLink-windows](https://www.segger.com/downloads/jlink/) >= 6.56 +- [GCC](https://gcc.gnu.org/releases.html) >= 7.3.0 +- [CMake](https://cmake.org/download/) >= 3.18.3 + +### STM32F746构建与运行 + +首先使用codegen编译LeNet模型,生成对应的STM32F46推理代码。具体命令如下: + +```bash +./codegen --codePath=. --modelPath=LeNet.ms --moduleName=LeNet --target=ARM32M +``` + +#### 代码工程说明 + +```bash +├── LeNet +└── operator_library +``` + +##### 算子静态库目录说明 + +在编译此工程之前需要预先获取Cortex-M 平台对应的[算子库]()。 + +预置算子静态库的目录如下: + +```bash +├── operator_library # 对应平台算子库目录 + ├── include # 对应平台算子库头文件目录 + └── lib # 对应平台算子库静态库目录 +``` + +生成代码工程目录如下: + +```bash +├── LeNet # 生成代码的根目录 + ├── benchmark # 生成代码的benchmark目录 + ├── include # 模型推理代码对外暴露头文件目录 + └── src # 模型推理代码目录 +``` + +#### 代码工程编译 + +##### 环境测试 + +安装好交叉编译所需环境后,需要在windows环境中依次将其加入到环境变量中 + +```bash +gcc -v # 查看GCC版本 +arm-none-eabi-gdb -v # 查看交叉编译环境 +jlink -v # 查看jlink版本 +make -v # 查看make版本 +``` + +以上的命令均有成功返回值时,表明环境准备ok,可以继续进入下一步,否则先安装上述环境!!! + +##### 生成STM32F746单板初始化代码([详情示例代码]()) + +1. 启动 STM32CubeMX,新建project,选择单板STM32F746IG + +2. 成功以后,选择`Makefile` ,`generator code` + +3. 在生成的工程目录下打开`cmd`,执行`make`,测试初始代码是否成功编译。 + + ```bash + # make成功结果 + arm-none-eabi-size build/test_stm32f746.elf + text data bss dec hex filename + 3660 20 1572 5252 1484 build/test_stm32f746.elf + arm-none-eabi-objcopy -O ihex build/test_stm32f746.elf build/test_stm32f746.hex + arm-none-eabi-objcopy -O binary -S build/test_stm32f746.elf build/test_stm32f746.bin + ``` + +##### 编译生成模型静态库 + +1. 拷贝mindspore团队提供的cortex-m7的算子静态库以及对应头文件到STM32CubeMX生成的工程目录中。 + +2. 拷贝codegen生成模型推理代码到 STM32CubeMX生成的代码工程目录中 + + ```bash + ├── .mxproject + └── build # 工程编译目录最终的elf文件存在于此 + └── Core + └── Drivers + └── LeNet # codegen生成的cortex-m7 模型推理代码 + └── Makefile # 组织工程makefile文件需要用户自己修改组织lenet && operator_library到工程目录中 + └── operator_library # mindspore团队提供的对应平台算子库 + └── startup_stm32f746xx.s + └── STM32F746IGKx_FLASH.ld + └── test_stm32f746.ioc + ``` + +3. 修改makefile文件,组织算子静态库以及模型推理代码 + + ```bash + # C includes + C_INCLUDES = \ + -ICore/Inc \ + -IDrivers/STM32F7xx_HAL_Driver/Inc \ + -IDrivers/STM32F7xx_HAL_Driver/Inc/Legacy \ + -IDrivers/CMSIS/Device/ST/STM32F7xx/Include \ + -Ioperator_library/include \ # 新增,指定算子库头文件目录 + -ILeNet/include \ # 新增,指定模型推理代码头文件 + -ILeNet/src # 新增,指定模型推理代码头文件 + # libraries + LIBS = -lc -lm -lnosys -lops # 修改,导入mindspore团队提供算子库 + LIBDIR = -Ioperator_library/lib/arm32m # 新增,指定算子库所在路径 + ``` + +4. 在工程目录的Core/Src的main.c编写模型调用代码,具体代码新增如下: + + ```cpp + ``` + +5. 在工程跟目中目录使用管理员权限打开`cmd` 执行 `make`进行编译 + + ```bash + make + ``` + +### STM32F746工程部署 + +使用jlink 将可执行文件拷贝到单板上并做推理 + +```bash +jlinkgdbserver # 启动jlinkgdbserver 选定target device为STM32F746IG +jlinkRTTViewer # 启动jlinkRTTViewer 选定target devices为STM32F746IG +arm-none-eabi-gdb # 启动arm-gcc gdb服务 +file build/target.elf # 打开调测文件 +target remote 127.0.0.1 # 连接jlink服务器 +monitor reset # 重置单板 +monitor halt # 挂起单板 +load # 加载可执行文件到单板 +c # 执行模型推理 +``` + +#### 执行结果 + +```bash +``` + +## 更多详情 + +### [Linux x86_64平台编译部署]() + +### [Android平台编译部署]() + diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/CMakeLists.txt b/mindspore/lite/micro/example/mnist_stm32f746/mnist/CMakeLists.txt new file mode 100755 index 00000000000..4f38570e27b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/CMakeLists.txt @@ -0,0 +1,59 @@ + + +cmake_minimum_required(VERSION 3.14) +project(benchmark) + +if(NOT DEFINED PKG_PATH) + message(FATAL_ERROR "PKG_PATH not set") +endif() + +get_filename_component(PKG_PATH ${PKG_PATH} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR}) + +set(HEADER_PATH ${PKG_PATH}/inference) + +option(MICRO_BUILD_ARM64 "build android arm64" OFF) +option(MICRO_BUILD_ARM32A "build android arm32" OFF) + +add_compile_definitions(NOT_USE_STL) + +if(MICRO_BUILD_ARM64 OR MICRO_BUILD_ARM32A) + add_compile_definitions(ENABLE_NEON) + add_compile_definitions(ENABLE_ARM) +endif() + +if(MICRO_BUILD_ARM64) + add_compile_definitions(ENABLE_ARM64) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8.2-a+dotprod") +endif() + +if(MICRO_BUILD_ARM32A) + add_compile_definitions(ENABLE_ARM32) + add_definitions(-mfloat-abi=softfp -mfpu=neon) +endif() + +set(CMAKE_C_FLAGS "${CMAKE_ENABLE_C99} ${CMAKE_C_FLAGS}") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17") +if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") + message(STATUS "build benchmark with debug info") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default") +else() + set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \ + -Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}") + set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \ + -Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}") +endif() + +add_subdirectory(src) +include_directories(${CMAKE_CURRENT_SOURCE_DIR}) +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../src/) +include_directories(${HEADER_PATH}) +set(SRC_FILES + benchmark/benchmark.cc + benchmark/load_input.c +) +add_executable(benchmark ${SRC_FILES}) +target_link_libraries(benchmark net -lm -pthread) + diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/CMakeLists.txt b/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/CMakeLists.txt new file mode 100755 index 00000000000..be96a1343f8 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/CMakeLists.txt @@ -0,0 +1,65 @@ + +cmake_minimum_required(VERSION 3.14) +project(benchmark) + +if(NOT DEFINED MODEL_LIB) + message(FATAL_ERROR "MODEL_LIB not set") +endif() + +if(NOT DEFINED HEADER_PATH) + message(FATAL_ERROR "HEADER_PATH not set") +endif() + +get_filename_component(MODEL_LIB ${MODEL_LIB} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR}) +get_filename_component(HEADER_PATH ${HEADER_PATH} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR}) + +function(parse_lib_info lib_full_path lib_name lib_path) + string(FIND "${lib_full_path}" "/" POS REVERSE) + math(EXPR POS "${POS} + 1") + string(SUBSTRING ${lib_full_path} 0 ${POS} path) + set(${lib_path} ${path} PARENT_SCOPE) + string(SUBSTRING ${lib_full_path} "${POS}" "-1" name) + set(${lib_name} ${name} PARENT_SCOPE) +endfunction(parse_lib_info) + +parse_lib_info(${MODEL_LIB} MODEL_LIB_NAME MODEL_LIB_PATH) + +message("project name: ${MODEL_LIB_NAME}") + +option(MICRO_BUILD_ARM64 "build android arm64" OFF) +option(MICRO_BUILD_ARM32A "build android arm32" OFF) + +if(MICRO_BUILD_ARM64 OR MICRO_BUILD_ARM32A) + add_compile_definitions(ENABLE_NEON) + add_compile_definitions(ENABLE_ARM) +endif() + +if(MICRO_BUILD_ARM64) + add_compile_definitions(ENABLE_ARM64) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8.2-a+dotprod") +endif() + +if(MICRO_BUILD_ARM32A) + add_compile_definitions(ENABLE_ARM32) + add_definitions(-mfloat-abi=softfp -mfpu=neon) +endif() + +set(CMAKE_C_FLAGS "${CMAKE_ENABLE_C99} ${CMAKE_C_FLAGS}") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17") +if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") + message(STATUS "build benchmark with debug info") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default") +else() + set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \ + -Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}") + set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \ + -Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}") +endif() +link_directories(${MODEL_LIB_PATH}) +include(benchmark.cmake) +add_executable(benchmark ${SRC_FILES}) +target_link_libraries(benchmark ${MODEL_LIB_NAME} -lm -pthread) + diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/benchmark.cc b/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/benchmark.cc new file mode 100755 index 00000000000..df5ac0d7c99 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/benchmark.cc @@ -0,0 +1,136 @@ + + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include "include/lite_session.h" +#include "include/ms_tensor.h" +#include "include/errorcode.h" + +#include "load_input.h" + +using namespace mindspore; + +void usage() { + printf( + "-- mindspore benchmark params usage:\n" + "args[0]: executable file\n" + "args[1]: inputs binary file\n" + "args[2]: model weight binary file\n" + "args[3]: loop count for performance test\n" + "args[4]: runtime thread num\n" + "args[5]: runtime thread bind mode\n\n"); +} + +template +void PrintData(void *data, size_t data_number) { + if (data == nullptr) { + return; + } + auto casted_data = static_cast(data); + for (size_t i = 0; i < 10 && i < data_number; i++) { + std::cout << std::to_string(casted_data[i]) << ", "; + } + std::cout << std::endl; +} + +void TensorToString(tensor::MSTensor *tensor) { + std::cout << ", DataType: " << tensor->data_type(); + std::cout << ", Size: " << tensor->Size(); + std::cout << ", Shape:"; + for (auto &dim : tensor->shape()) { + std::cout << " " << dim; + } + std::cout << ", Data:" << std::endl; + switch (tensor->data_type()) { + case kNumberTypeFloat32: { + PrintData(tensor->MutableData(), tensor->ElementsNum()); + } break; + case kNumberTypeFloat16: { + PrintData(tensor->MutableData(), tensor->ElementsNum()); + } break; + case kNumberTypeInt32: { + PrintData(tensor->MutableData(), tensor->ElementsNum()); + } break; + case kNumberTypeInt16: { + PrintData(tensor->MutableData(), tensor->ElementsNum()); + } break; + case kNumberTypeInt8: { + PrintData(tensor->MutableData(), tensor->ElementsNum()); + } break; + case kNumberTypeUInt8: { + PrintData(tensor->MutableData(), tensor->ElementsNum()); + } break; + default: + std::cout << "Unsupported data type to print" << std::endl; + break; + } +} + +int main(int argc, const char **argv) { + if (argc < 2) { + std::cout << "input command is invalid\n" << std::endl; + usage(); + return lite::RET_ERROR; + } + std::cout << "start run benchmark" << std::endl; + + const char *model_buffer = nullptr; + int model_size = 0; + // read .bin file by ReadBinaryFile; + if (argc >= 3) { + model_buffer = static_cast(ReadInputData(argv[2], &model_size)); + } + session::LiteSession *session = mindspore::session::LiteSession::CreateSession(model_buffer, model_size, nullptr); + if (session == nullptr) { + std::cerr << "create lite session failed" << std::endl; + return lite::RET_ERROR; + } + + // set model inputs tensor data + Vector inputs = session->GetInputs(); + size_t inputs_num = inputs.size(); + void *inputs_binbuf[inputs_num]; + int inputs_size[inputs_num]; + for (size_t i = 0; i < inputs_num; ++i) { + inputs_size[i] = inputs[i]->Size(); + } + int ret = ReadInputsFile(const_cast(argv[1]), inputs_binbuf, inputs_size, inputs_num); + if (ret != lite::RET_OK) { + return lite::RET_ERROR; + } + for (size_t i = 0; i < inputs_num; ++i) { + void *input_data = inputs[i]->MutableData(); + memcpy(input_data, inputs_binbuf[i], inputs_size[i]); + } + + ret = session->RunGraph(); + if (ret != lite::RET_OK) { + return lite::RET_ERROR; + } + + std::cout << "run benchmark success" << std::endl; + delete session; + for (size_t i = 0; i < inputs_num; ++i) { + free(inputs_binbuf[i]); + } + return lite::RET_OK; +} + diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/benchmark.cmake b/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/benchmark.cmake new file mode 100755 index 00000000000..92461e9a56a --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/benchmark.cmake @@ -0,0 +1,7 @@ +include_directories(${CMAKE_CURRENT_SOURCE_DIR}) +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../src/) +include_directories(${HEADER_PATH}) +set(SRC_FILES + benchmark.cc + load_input.c +) diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/load_input.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/load_input.c new file mode 100755 index 00000000000..f0baa78f7d4 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/load_input.c @@ -0,0 +1,95 @@ + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "load_input.h" +#include +#include +#include + +void *ReadInputData(const char *real_input_path, int *size) { + if (real_input_path == NULL) { + return NULL; + } + if (strstr(real_input_path, ".bin") || strstr(real_input_path, ".net")) { + FILE *file; + file = fopen(real_input_path, "rb+"); + if (!file) { + printf("Can't find %s\n", real_input_path); + return NULL; + } + int curr_file_posi = ftell(file); + fseek(file, 0, SEEK_END); + *size = ftell(file); + unsigned char *buf = malloc((*size)); + (void)memset(buf, 0, (*size)); + fseek(file, curr_file_posi, SEEK_SET); + int read_size = (int)(fread(buf, 1, *size, file)); + if (read_size != (*size)) { + printf("read file failed, total file size: %d, read_size: %d\n", (*size), read_size); + fclose(file); + free(buf); + return NULL; + } + fclose(file); + return (void *)buf; + } else { + printf("input data file should be .bin , .net"); + return NULL; + } +} + +void SaveOutputData(char *final_name, unsigned char *output_data, unsigned int out_size) { + FILE *output_file; + output_file = fopen(final_name, "w"); + if (output_file == NULL) { + printf("fopen output file: %s failed\n", final_name); + return; + } + unsigned char str[out_size]; + for (unsigned int i = 0; i < out_size; ++i) { + str[i] = output_data[i]; + fprintf(output_file, "%d\t", str[i]); + } + fclose(output_file); +} + +int ReadInputsFile(char *path, void **buffers, const int *inputs_size, int inputs_num) { + char *inputs_path[inputs_num]; + char *delim = ","; + char *token; + int i = 0; + while ((token = strtok_r(path, delim, &path))) { + if (i >= inputs_num) { + printf("inputs num is error, need: %d\n", inputs_num); + return -1; + } + inputs_path[i] = token; + printf("input %d: %s\n", i, inputs_path[i]); + i++; + } + + for (i = 0; i < inputs_num; ++i) { + int size = 0; + buffers[i] = ReadInputData(inputs_path[i], &size); + if (size != inputs_size[i] || buffers[i] == NULL) { + printf("size mismatch, %s, input: %d, needed: %d\n", inputs_path[i], size, inputs_size[i]); + return -1; + } + } + return 0; +} + diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/load_input.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/load_input.h new file mode 100755 index 00000000000..909a4ac16b1 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/benchmark/load_input.h @@ -0,0 +1,36 @@ + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_ +#define MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +void *ReadInputData(const char *real_input_path, int *size); + +void SaveOutputData(char *final_name, unsigned char *output_data, unsigned int out_size); + +int ReadInputsFile(char *path, void **buffers, const int *inputs_size, int inputs_num); + +#ifdef __cplusplus +} +#endif + +#endif // MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_ + diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/cell.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/cell.h new file mode 100755 index 00000000000..3039fa816bb --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/cell.h @@ -0,0 +1,133 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_CELL_H +#define MINDSPORE_INCLUDE_API_CELL_H +#include +#include +#include +#include +#include "include/api/status.h" +#include "include/api/types.h" +#include "include/api/graph.h" + +namespace mindspore { +class InputAndOutput; +using Input = InputAndOutput; +using Output = InputAndOutput; + +class MS_API CellBase { + public: + CellBase() = default; + virtual ~CellBase() = default; + virtual std::vector Construct(const std::vector &inputs) { return {}; } + virtual std::shared_ptr Clone() const = 0; + virtual Status Run(const std::vector &inputs, std::vector *outputs) { return kSuccess; } + std::vector operator()(const std::vector &inputs) const; +}; + +template +class MS_API Cell : public CellBase { + public: + virtual ~Cell() = default; + std::shared_ptr Clone() const override { return std::make_shared(static_cast(*this)); } +}; + +class MS_API ParameterCell final : public Cell { + public: + ParameterCell() = default; + ~ParameterCell() override = default; + + ParameterCell(const ParameterCell &); + ParameterCell &operator=(const ParameterCell &); + + ParameterCell(ParameterCell &&); + ParameterCell &operator=(ParameterCell &&); + + explicit ParameterCell(const MSTensor &); + ParameterCell &operator=(const MSTensor &); + + explicit ParameterCell(MSTensor &&); + ParameterCell &operator=(MSTensor &&); + + MSTensor GetTensor() const { return tensor_; } + + private: + MSTensor tensor_; +}; + +class MS_API OpCellBase : public CellBase { + public: + explicit OpCellBase(const std::string &name) : name_(name) {} + ~OpCellBase() override = default; + const std::string &GetOpType() const { return name_; } + + protected: + std::string name_; +}; + +template +class MS_API OpCell : public OpCellBase, public std::enable_shared_from_this { + public: + explicit OpCell(const std::string &name) : OpCellBase(name) {} + ~OpCell() override = default; + std::shared_ptr Clone() const override { return std::make_shared(static_cast(*this)); } +}; + +class MS_API GraphCell final : public Cell { + public: + class GraphImpl; + + GraphCell() = default; + ~GraphCell() override = default; + + explicit GraphCell(const Graph &); + explicit GraphCell(Graph &&); + explicit GraphCell(const std::shared_ptr &); + + const std::shared_ptr &GetGraph() const { return graph_; } + Status Run(const std::vector &inputs, std::vector *outputs) override; + std::vector GetInputs(); + std::vector GetOutputs(); + + private: + friend class ModelImpl; + Status Load(); + + std::shared_ptr graph_; + std::shared_ptr executor_; +}; + +class MS_API InputAndOutput { + public: + InputAndOutput(); + ~InputAndOutput() = default; + + // no explicit + InputAndOutput(const MSTensor &); // NOLINT(runtime/explicit) + InputAndOutput(MSTensor &&); // NOLINT(runtime/explicit) + + InputAndOutput(const std::shared_ptr &, const std::vector &, int32_t index); + + int32_t GetIndex() const { return index_; } + void SetIndex(int32_t index) { index_ = index; } + + private: + std::shared_ptr cell_; + std::vector prev_; + int32_t index_; +}; +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_CELL_H diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/context.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/context.h new file mode 100755 index 00000000000..3f52d7ae9d9 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/context.h @@ -0,0 +1,185 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_CONTEXT_H +#define MINDSPORE_INCLUDE_API_CONTEXT_H + +#include +#include +#include +#include +#include "include/api/types.h" +#include "include/api/dual_abi_helper.h" + +namespace mindspore { +constexpr auto kDeviceTypeAscend310 = "Ascend310"; +constexpr auto kDeviceTypeAscend910 = "Ascend910"; +constexpr auto kDeviceTypeGPU = "GPU"; + +struct MS_API Context { + public: + Context(); + virtual ~Context() = default; + struct Data; + std::shared_ptr data; +}; + +struct MS_API GlobalContext : public Context { + public: + static std::shared_ptr GetGlobalContext(); + + static inline void SetGlobalDeviceTarget(const std::string &device_target); + static inline std::string GetGlobalDeviceTarget(); + + static void SetGlobalDeviceID(const uint32_t &device_id); + static uint32_t GetGlobalDeviceID(); + + static inline void SetGlobalDumpConfigPath(const std::string &cfg_path); + static inline std::string GetGlobalDumpConfigPath(); + + private: + // api without std::string + static void SetGlobalDeviceTarget(const std::vector &device_target); + static std::vector GetGlobalDeviceTargetChar(); + + static void SetGlobalDumpConfigPath(const std::vector &cfg_path); + static std::vector GetGlobalDumpConfigPathChar(); +}; + +struct MS_API ModelContext : public Context { + public: + static inline void SetInsertOpConfigPath(const std::shared_ptr &context, const std::string &cfg_path); + static inline std::string GetInsertOpConfigPath(const std::shared_ptr &context); + + static inline void SetInputFormat(const std::shared_ptr &context, const std::string &format); + static inline std::string GetInputFormat(const std::shared_ptr &context); + + static inline void SetInputShape(const std::shared_ptr &context, const std::string &shape); + static inline std::string GetInputShape(const std::shared_ptr &context); + + static void SetInputShapeMap(const std::shared_ptr &context, const std::map> &shape); + static std::map> GetInputShapeMap(const std::shared_ptr &context); + + static void SetDynamicBatchSize(const std::shared_ptr &context, + const std::vector &dynamic_batch_size); + static inline std::string GetDynamicBatchSize(const std::shared_ptr &context); + + static void SetOutputType(const std::shared_ptr &context, enum DataType output_type); + static enum DataType GetOutputType(const std::shared_ptr &context); + + static inline void SetPrecisionMode(const std::shared_ptr &context, const std::string &precision_mode); + static inline std::string GetPrecisionMode(const std::shared_ptr &context); + + static inline void SetOpSelectImplMode(const std::shared_ptr &context, + const std::string &op_select_impl_mode); + static inline std::string GetOpSelectImplMode(const std::shared_ptr &context); + + static inline void SetFusionSwitchConfigPath(const std::shared_ptr &context, const std::string &cfg_path); + static inline std::string GetFusionSwitchConfigPath(const std::shared_ptr &context); + + static inline void SetGpuTrtInferMode(const std::shared_ptr &context, const std::string &gpu_trt_infer_mode); + static inline std::string GetGpuTrtInferMode(const std::shared_ptr &context); + + private: + // api without std::string + static void SetInsertOpConfigPath(const std::shared_ptr &context, const std::vector &cfg_path); + static std::vector GetInsertOpConfigPathChar(const std::shared_ptr &context); + + static void SetInputFormat(const std::shared_ptr &context, const std::vector &format); + static std::vector GetInputFormatChar(const std::shared_ptr &context); + + static void SetInputShape(const std::shared_ptr &context, const std::vector &shape); + static std::vector GetInputShapeChar(const std::shared_ptr &context); + + static void SetPrecisionMode(const std::shared_ptr &context, const std::vector &precision_mode); + static std::vector GetPrecisionModeChar(const std::shared_ptr &context); + + static void SetOpSelectImplMode(const std::shared_ptr &context, + const std::vector &op_select_impl_mode); + static std::vector GetOpSelectImplModeChar(const std::shared_ptr &context); + + static void SetFusionSwitchConfigPath(const std::shared_ptr &context, const std::vector &cfg_path); + static std::vector GetFusionSwitchConfigPathChar(const std::shared_ptr &context); + + static void SetGpuTrtInferMode(const std::shared_ptr &context, const std::vector &gpu_trt_infer_mode); + static std::vector GetGpuTrtInferModeChar(const std::shared_ptr &context); + static std::vector GetDynamicBatchSizeChar(const std::shared_ptr &context); +}; + +void GlobalContext::SetGlobalDeviceTarget(const std::string &device_target) { + SetGlobalDeviceTarget(StringToChar(device_target)); +} +std::string GlobalContext::GetGlobalDeviceTarget() { return CharToString(GetGlobalDeviceTargetChar()); } + +void GlobalContext::SetGlobalDumpConfigPath(const std::string &cfg_path) { + SetGlobalDumpConfigPath(StringToChar(cfg_path)); +} +std::string GlobalContext::GetGlobalDumpConfigPath() { return CharToString(GetGlobalDumpConfigPathChar()); } + +void ModelContext::SetInsertOpConfigPath(const std::shared_ptr &context, const std::string &cfg_path) { + SetInsertOpConfigPath(context, StringToChar(cfg_path)); +} +std::string ModelContext::GetInsertOpConfigPath(const std::shared_ptr &context) { + return CharToString(GetInsertOpConfigPathChar(context)); +} + +void ModelContext::SetInputFormat(const std::shared_ptr &context, const std::string &format) { + SetInputFormat(context, StringToChar(format)); +} +std::string ModelContext::GetInputFormat(const std::shared_ptr &context) { + return CharToString(GetInputFormatChar(context)); +} + +void ModelContext::SetInputShape(const std::shared_ptr &context, const std::string &shape) { + SetInputShape(context, StringToChar(shape)); +} +std::string ModelContext::GetInputShape(const std::shared_ptr &context) { + return CharToString(GetInputShapeChar(context)); +} + +void ModelContext::SetPrecisionMode(const std::shared_ptr &context, const std::string &precision_mode) { + SetPrecisionMode(context, StringToChar(precision_mode)); +} +std::string ModelContext::GetPrecisionMode(const std::shared_ptr &context) { + return CharToString(GetPrecisionModeChar(context)); +} + +void ModelContext::SetOpSelectImplMode(const std::shared_ptr &context, + const std::string &op_select_impl_mode) { + SetOpSelectImplMode(context, StringToChar(op_select_impl_mode)); +} +std::string ModelContext::GetOpSelectImplMode(const std::shared_ptr &context) { + return CharToString(GetOpSelectImplModeChar(context)); +} + +void ModelContext::SetFusionSwitchConfigPath(const std::shared_ptr &context, const std::string &cfg_path) { + SetFusionSwitchConfigPath(context, StringToChar(cfg_path)); +} +std::string ModelContext::GetFusionSwitchConfigPath(const std::shared_ptr &context) { + return CharToString(GetFusionSwitchConfigPathChar(context)); +} + +std::string ModelContext::GetDynamicBatchSize(const std::shared_ptr &context) { + return CharToString(GetDynamicBatchSizeChar(context)); +} + +void ModelContext::SetGpuTrtInferMode(const std::shared_ptr &context, const std::string &gpu_trt_infer_mode) { + SetGpuTrtInferMode(context, StringToChar(gpu_trt_infer_mode)); +} +std::string ModelContext::GetGpuTrtInferMode(const std::shared_ptr &context) { + return CharToString(GetGpuTrtInferModeChar(context)); +} +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_CONTEXT_H diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/data_type.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/data_type.h new file mode 100755 index 00000000000..a39488a83d3 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/data_type.h @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_DATA_TYPE_H_ +#define MINDSPORE_INCLUDE_API_DATA_TYPE_H_ + +namespace mindspore { +enum class DataType : int { + kTypeUnknown = 0, + kObjectTypeString = 12, + kObjectTypeList = 13, + kObjectTypeTuple = 14, + kObjectTypeTensorType = 17, + kNumberTypeBool = 30, + kNumberTypeInt8 = 32, + kNumberTypeInt16 = 33, + kNumberTypeInt32 = 34, + kNumberTypeInt64 = 35, + kNumberTypeUInt8 = 37, + kNumberTypeUInt16 = 38, + kNumberTypeUInt32 = 39, + kNumberTypeUInt64 = 40, + kNumberTypeFloat16 = 42, + kNumberTypeFloat32 = 43, + kNumberTypeFloat64 = 44, + kNumberTypeEnd = 46, + // add new enum here + kInvalidType = INT32_MAX, +}; +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_DATA_TYPE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/dual_abi_helper.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/dual_abi_helper.h new file mode 100755 index 00000000000..7d56d5ac72d --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/dual_abi_helper.h @@ -0,0 +1,164 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_ +#define MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace mindspore { +inline std::vector StringToChar(const std::string &s) { return std::vector(s.begin(), s.end()); } + +inline std::string CharToString(const std::vector &c) { return std::string(c.begin(), c.end()); } + +inline std::optional> OptionalStringToChar(const std::optional &s) { + if (s == std::nullopt) return std::nullopt; + std::optional> ret = std::vector(s->begin(), s->end()); + return ret; +} + +inline std::optional OptionalCharToString(const std::optional> &c) { + if (c == std::nullopt) return std::nullopt; + std::optional ret = std::string(c->begin(), c->end()); + return ret; +} + +inline std::pair, int32_t> PairStringToChar(const std::pair &s) { + return std::pair, int32_t>(std::vector(s.first.begin(), s.first.end()), s.second); +} + +inline std::pair PairCharToString(const std::pair, int32_t> &c) { + return std::pair(std::string(c.first.begin(), c.first.end()), c.second); +} + +inline std::vector> VectorStringToChar(const std::vector &s) { + std::vector> ret; + std::transform(s.begin(), s.end(), std::back_inserter(ret), + [](auto str) { return std::vector(str.begin(), str.end()); }); + return ret; +} + +inline std::vector VectorCharToString(const std::vector> &c) { + std::vector ret; + std::transform(c.begin(), c.end(), std::back_inserter(ret), + [](auto ch) { return std::string(ch.begin(), ch.end()); }); + return ret; +} + +inline std::set> SetStringToChar(const std::set &s) { + std::set> ret; + std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()), + [](auto str) { return std::vector(str.begin(), str.end()); }); + return ret; +} + +inline std::set SetCharToString(const std::set> &c) { + std::set ret; + std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()), + [](auto ch) { return std::string(ch.begin(), ch.end()); }); + return ret; +} + +inline std::map, int32_t> MapStringToChar(const std::map &s) { + std::map, int32_t> ret; + std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()), [](auto str) { + return std::pair, int32_t>(std::vector(str.first.begin(), str.first.end()), str.second); + }); + return ret; +} + +inline std::map MapCharToString(const std::map, int32_t> &c) { + std::map ret; + std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()), [](auto ch) { + return std::pair(std::string(ch.first.begin(), ch.first.end()), ch.second); + }); + return ret; +} + +inline std::map, std::vector> UnorderedMapStringToChar( + const std::unordered_map &s) { + std::map, std::vector> ret; + std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()), [](auto str) { + return std::pair, std::vector>(std::vector(str.first.begin(), str.first.end()), + std::vector(str.second.begin(), str.second.end())); + }); + return ret; +} + +inline std::unordered_map UnorderedMapCharToString( + const std::map, std::vector> &c) { + std::unordered_map ret; + std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()), [](auto ch) { + return std::pair(std::string(ch.first.begin(), ch.first.end()), + std::string(ch.second.begin(), ch.second.end())); + }); + return ret; +} + +inline std::vector, std::vector>> ClassIndexStringToChar( + const std::vector>> &s) { + std::vector, std::vector>> ret; + std::transform(s.begin(), s.end(), std::back_inserter(ret), [](auto str) { + return std::pair, std::vector>(std::vector(str.first.begin(), str.first.end()), + str.second); + }); + return ret; +} + +inline std::vector>> ClassIndexCharToString( + const std::vector, std::vector>> &c) { + std::vector>> ret; + std::transform(c.begin(), c.end(), std::back_inserter(ret), [](auto ch) { + return std::pair>(std::string(ch.first.begin(), ch.first.end()), ch.second); + }); + return ret; +} + +template +inline std::map, T> PadInfoStringToChar(const std::map &s_pad_info) { + std::map, T> ret; + std::transform(s_pad_info.begin(), s_pad_info.end(), std::inserter(ret, ret.begin()), [](auto str) { + return std::pair, T>(std::vector(str.first.begin(), str.first.end()), str.second); + }); + return ret; +} + +template +inline std::map PadInfoCharToString(const std::map, T> &c_pad_info) { + std::map ret; + std::transform(c_pad_info.begin(), c_pad_info.end(), std::inserter(ret, ret.begin()), [](auto ch) { + return std::pair(std::string(ch.first.begin(), ch.first.end()), ch.second); + }); + return ret; +} + +template +inline void TensorMapCharToString(const std::map, T> *c, std::unordered_map *s) { + for (auto ch : *c) { + auto key = std::string(ch.first.begin(), ch.first.end()); + auto val = ch.second; + s->insert(std::pair(key, val)); + } +} +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/graph.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/graph.h new file mode 100755 index 00000000000..892f60495a8 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/graph.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_GRAPH_H +#define MINDSPORE_INCLUDE_API_GRAPH_H + +#include +#include +#include +#include +#include "include/api/status.h" +#include "include/api/types.h" + +namespace mindspore { +class MS_API Graph { + public: + class GraphData; + explicit Graph(const std::shared_ptr &graph_data); + explicit Graph(std::shared_ptr &&graph_data); + explicit Graph(std::nullptr_t); + ~Graph(); + + enum ModelType ModelType() const; + bool operator==(std::nullptr_t) const; + + private: + friend class GraphCell; + friend class ModelImpl; + std::shared_ptr graph_data_; +}; +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_GRAPH_H diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/lite_context.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/lite_context.h new file mode 100755 index 00000000000..bb06cff782a --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/lite_context.h @@ -0,0 +1,71 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_LITE_CONTEXT_H +#define MINDSPORE_INCLUDE_API_LITE_CONTEXT_H + +#include +#include +#include +#include +#include "include/api/types.h" +#include "include/lite_types.h" + +namespace mindspore { +namespace lite { +class Allocator; +} // namespace lite + +struct MS_API Context { + public: + static void Clear(const std::shared_ptr &context); + + static void SetAsDefault(const std::shared_ptr &context); + + static void SetVendorName(const std::shared_ptr &context, const std::string &name); + static std::string GetVendorName(const std::shared_ptr &context); + + static void SetThreadNum(const std::shared_ptr &context, int num); + static int GetThreadNum(const std::shared_ptr &context); + + static void SetAllocator(const std::shared_ptr &context, std::shared_ptr alloc); + static std::shared_ptr GetAllocator(const std::shared_ptr &context); + + static void ConfigCPU(const std::shared_ptr &context, bool config); + static bool IfCPUEnabled(const std::shared_ptr &context); + + static void ConfigCPUFp16(const std::shared_ptr &context, bool config); + static bool IfCPUFp16Enabled(const std::shared_ptr &context); + + static void SetCPUBindMode(const std::shared_ptr &context, lite::CpuBindMode mode); + static lite::CpuBindMode GetCPUBindMode(const std::shared_ptr &context); + + static void ConfigGPU(const std::shared_ptr &context, bool config); + static bool IfGPUEnabled(const std::shared_ptr &context); + + static void ConfigGPUFp16(const std::shared_ptr &context, bool config); + static bool IfGPUFp16Enabled(const std::shared_ptr &context); + + static void ConfigNPU(const std::shared_ptr &context, bool config); + static bool IfNPUEnabled(const std::shared_ptr &context); + + static void SetNPUFrequency(const std::shared_ptr &context, int freq); + static int GetNPUFrequency(const std::shared_ptr &context); + + private: + std::map context_; +}; +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_LITE_CONTEXT_H diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/model.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/model.h new file mode 100755 index 00000000000..78f202fae27 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/model.h @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_MODEL_H +#define MINDSPORE_INCLUDE_API_MODEL_H + +#include +#include +#include +#include +#include +#include "include/api/status.h" +#include "include/api/types.h" +#include "include/api/graph.h" +#include "include/api/cell.h" +#include "include/api/dual_abi_helper.h" + +namespace mindspore { +class ModelImpl; +struct Context; + +class MS_API Model { + public: + explicit Model(const std::vector &network, const std::shared_ptr &model_context = nullptr); + explicit Model(const GraphCell &graph, const std::shared_ptr &model_context = nullptr); + ~Model(); + Model(const Model &) = delete; + void operator=(const Model &) = delete; + + Status Build(); + Status Resize(const std::vector &inputs, const std::vector> &dims); + + Status Predict(const std::vector &inputs, std::vector *outputs); + + std::vector GetInputs(); + std::vector GetOutputs(); + + static inline bool CheckModelSupport(const std::string &device_type, ModelType model_type); + + private: + // api without std::string + static bool CheckModelSupport(const std::vector &device_type, ModelType model_type); + std::shared_ptr impl_; +}; + +bool Model::CheckModelSupport(const std::string &device_type, ModelType model_type) { + return CheckModelSupport(StringToChar(device_type), model_type); +} +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_MODEL_H diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/ops/ops.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/ops/ops.h new file mode 100755 index 00000000000..5e56c17377d --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/ops/ops.h @@ -0,0 +1,48 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_OPS_OPS_H +#define MINDSPORE_INCLUDE_API_OPS_OPS_H + +#include +#include +#include +#include +#include "include/api/status.h" +#include "include/api/types.h" +#include "include/api/cell.h" + +namespace mindspore { +struct MS_API Conv2D : public OpCell { + Conv2D() : OpCell("Conv2D") {} + ~Conv2D() override = default; + std::vector Construct(const std::vector &inputs) override; + Conv2D(int out_channel, const std::vector &kernel_size, int mode = 1, const std::string &pad_mode = "valid", + const std::vector &pad = {0, 0, 0, 0}, const std::vector &stride = {1, 1, 1, 1}, + const std::vector &dilation = {1, 1, 1, 1}, int group = 1); + + Output operator()(const Input &, const Input &) const; + + int out_channel; + std::vector kernel_size; + int mode = 1; + std::string pad_mode = "valid"; + std::vector pad = {0, 0, 0, 0}; + std::vector stride = {1, 1, 1, 1}; + std::vector dilation = {1, 1, 1, 1}; + int group = 1; +}; +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_OPS_OPS_H diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/serialization.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/serialization.h new file mode 100755 index 00000000000..c5fb61eb07d --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/serialization.h @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_SERIALIZATION_H +#define MINDSPORE_INCLUDE_API_SERIALIZATION_H + +#include +#include +#include +#include +#include "include/api/status.h" +#include "include/api/types.h" +#include "include/api/model.h" +#include "include/api/graph.h" +#include "include/api/dual_abi_helper.h" + +namespace mindspore { +class MS_API Serialization { + public: + static Graph LoadModel(const void *model_data, size_t data_size, ModelType model_type); + inline static Graph LoadModel(const std::string &file, ModelType model_type); + static Status LoadCheckPoint(const std::string &ckpt_file, std::map *parameters); + static Status SetParameters(const std::map ¶meters, Model *model); + static Status ExportModel(const Model &model, ModelType model_type, Buffer *model_data); + static Status ExportModel(const Model &model, ModelType model_type, const std::string &model_file); + + private: + static Graph LoadModel(const std::vector &file, ModelType model_type); +}; + +Graph Serialization::LoadModel(const std::string &file, ModelType model_type) { + return LoadModel(StringToChar(file), model_type); +} +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_SERIALIZATION_H diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/status.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/status.h new file mode 100755 index 00000000000..3a5c7c4e018 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/status.h @@ -0,0 +1,164 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_STATUS_H +#define MINDSPORE_INCLUDE_API_STATUS_H + +#include +#include +#include +#include +#include +#include "include/api/dual_abi_helper.h" +#include "include/api/types.h" + +namespace mindspore { +enum CompCode : uint32_t { + kCore = 0x00000000u, + kMD = 0x10000000u, + kME = 0x20000000u, + kMC = 0x30000000u, + kLite = 0xF0000000u, +}; + +enum StatusCode : uint32_t { + kSuccess = 0, + // Core + kCoreFailed = kCore | 0x1, + + // MD + kMDOutOfMemory = kMD | 1, + kMDShapeMisMatch = kMD | 2, + kMDInterrupted = kMD | 3, + kMDNoSpace = kMD | 4, + kMDPyFuncException = kMD | 5, + kMDDuplicateKey = kMD | 6, + kMDPythonInterpreterFailure = kMD | 7, + kMDTDTPushFailure = kMD | 8, + kMDFileNotExist = kMD | 9, + kMDProfilingError = kMD | 10, + kMDBoundingBoxOutOfBounds = kMD | 11, + kMDBoundingBoxInvalidShape = kMD | 12, + kMDSyntaxError = kMD | 13, + kMDTimeOut = kMD | 14, + kMDBuddySpaceFull = kMD | 15, + kMDNetWorkError = kMD | 16, + kMDNotImplementedYet = kMD | 17, + // Make this error code the last one. Add new error code above it. + kMDUnexpectedError = kMD | 127, + + // ME + kMEFailed = kME | 0x1, + kMEInvalidInput = kME | 0x2, + + // MC + kMCFailed = kMC | 0x1, + kMCDeviceError = kMC | 0x2, + kMCInvalidInput = kMC | 0x3, + kMCInvalidArgs = kMC | 0x4, + + // Lite // Common error code, range: [-1, -100) + kLiteError = kLite | (0x0FFFFFFF & -1), /**< Common error code. */ + kLiteNullptr = kLite | (0x0FFFFFFF & -2), /**< NULL pointer returned.*/ + kLiteParamInvalid = kLite | (0x0FFFFFFF & -3), /**< Invalid parameter.*/ + kLiteNoChange = kLite | (0x0FFFFFFF & -4), /**< No change. */ + kLiteSuccessExit = kLite | (0x0FFFFFFF & -5), /**< No error but exit. */ + kLiteMemoryFailed = kLite | (0x0FFFFFFF & -6), /**< Fail to create memory. */ + kLiteNotSupport = kLite | (0x0FFFFFFF & -7), /**< Fail to support. */ + kLiteThreadPoolError = kLite | (0x0FFFFFFF & -8), /**< Error occur in thread pool. */ + + // Executor error code, range: [-100,-200) + kLiteOutOfTensorRange = kLite | (0x0FFFFFFF & -100), /**< Failed to check range. */ + kLiteInputTensorError = kLite | (0x0FFFFFFF & -101), /**< Failed to check input tensor. */ + kLiteReentrantError = kLite | (0x0FFFFFFF & -102), /**< Exist executor running. */ + + // Graph error code, range: [-200,-300) + kLiteGraphFileError = kLite | (0x0FFFFFFF & -200), /**< Failed to verify graph file. */ + + // Node error code, range: [-300,-400) + kLiteNotFindOp = kLite | (0x0FFFFFFF & -300), /**< Failed to find operator. */ + kLiteInvalidOpName = kLite | (0x0FFFFFFF & -301), /**< Invalid operator name. */ + kLiteInvalidOpAttr = kLite | (0x0FFFFFFF & -302), /**< Invalid operator attr. */ + kLiteOpExecuteFailure = kLite | (0x0FFFFFFF & -303), /**< Failed to execution operator. */ + + // Tensor error code, range: [-400,-500) + kLiteFormatError = kLite | (0x0FFFFFFF & -400), /**< Failed to checking tensor format. */ + + // InferShape error code, range: [-500,-600) + kLiteInferError = kLite | (0x0FFFFFFF & -500), /**< Failed to infer shape. */ + kLiteInferInvalid = kLite | (0x0FFFFFFF & -501), /**< Invalid infer shape before runtime. */ + + // User input param error code, range: [-600, 700) + kLiteInputParamInvalid = kLite | (0x0FFFFFFF & -600), /**< Invalid input param by user. */ +}; + +class MS_API Status { + public: + Status(); + inline Status(enum StatusCode status_code, const std::string &status_msg = ""); // NOLINT(runtime/explicit) + inline Status(const StatusCode code, int line_of_code, const char *file_name, const std::string &extra = ""); + + ~Status() = default; + + enum StatusCode StatusCode() const; + inline std::string ToString() const; + + int GetLineOfCode() const; + inline std::string GetErrDescription() const; + inline std::string SetErrDescription(const std::string &err_description); + + friend std::ostream &operator<<(std::ostream &os, const Status &s); + + bool operator==(const Status &other) const; + bool operator==(enum StatusCode other_code) const; + bool operator!=(const Status &other) const; + bool operator!=(enum StatusCode other_code) const; + + explicit operator bool() const; + explicit operator int() const; + + static Status OK(); + + bool IsOk() const; + + bool IsError() const; + + static inline std::string CodeAsString(enum StatusCode c); + + private: + // api without std::string + explicit Status(enum StatusCode status_code, const std::vector &status_msg); + Status(const enum StatusCode code, int line_of_code, const char *file_name, const std::vector &extra); + std::vector ToCString() const; + std::vector GetErrDescriptionChar() const; + std::vector SetErrDescription(const std::vector &err_description); + static std::vector CodeAsCString(enum StatusCode c); + + struct Data; + std::shared_ptr data_; +}; + +Status::Status(enum StatusCode status_code, const std::string &status_msg) + : Status(status_code, StringToChar(status_msg)) {} +Status::Status(const enum StatusCode code, int line_of_code, const char *file_name, const std::string &extra) + : Status(code, line_of_code, file_name, StringToChar(extra)) {} +std::string Status::ToString() const { return CharToString(ToCString()); } +std::string Status::GetErrDescription() const { return CharToString(GetErrDescriptionChar()); } +std::string Status::SetErrDescription(const std::string &err_description) { + return CharToString(SetErrDescription(StringToChar(err_description))); +} +std::string Status::CodeAsString(enum StatusCode c) { return CharToString(CodeAsCString(c)); } +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_STATUS_H diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/types.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/types.h new file mode 100755 index 00000000000..482159d8c05 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/api/types.h @@ -0,0 +1,122 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_INCLUDE_API_TYPES_H +#define MINDSPORE_INCLUDE_API_TYPES_H + +#include +#include +#include +#include +#include "include/api/data_type.h" +#include "include/api/dual_abi_helper.h" + +#ifdef _WIN32 +#define MS_API __declspec(dllexport) +#else +#define MS_API __attribute__((visibility("default"))) +#endif + +namespace mindspore { +enum ModelType : uint32_t { + kMindIR = 0, + kAIR = 1, + kOM = 2, + kONNX = 3, + // insert new data type here + kUnknownType = 0xFFFFFFFF +}; + +class MS_API MSTensor { + public: + class Impl; + + static inline MSTensor CreateTensor(const std::string &name, DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept; + static inline MSTensor CreateRefTensor(const std::string &name, DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept; + + MSTensor(); + explicit MSTensor(const std::shared_ptr &impl); + inline MSTensor(const std::string &name, DataType type, const std::vector &shape, const void *data, + size_t data_len); + ~MSTensor(); + + inline std::string Name() const; + enum DataType DataType() const; + const std::vector &Shape() const; + int64_t ElementNum() const; + + std::shared_ptr Data() const; + void *MutableData(); + size_t DataSize() const; + + bool IsDevice() const; + + MSTensor Clone() const; + bool operator==(std::nullptr_t) const; + + private: + // api without std::string + static MSTensor CreateTensor(const std::vector &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept; + static MSTensor CreateRefTensor(const std::vector &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept; + MSTensor(const std::vector &name, enum DataType type, const std::vector &shape, const void *data, + size_t data_len); + std::vector CharName() const; + + friend class ModelImpl; + explicit MSTensor(std::nullptr_t); + std::shared_ptr impl_; +}; + +class MS_API Buffer { + public: + Buffer(); + Buffer(const void *data, size_t data_len); + ~Buffer(); + + const void *Data() const; + void *MutableData(); + size_t DataSize() const; + + bool ResizeData(size_t data_len); + bool SetData(const void *data, size_t data_len); + + Buffer Clone() const; + + private: + class Impl; + std::shared_ptr impl_; +}; + +MSTensor MSTensor::CreateTensor(const std::string &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept { + return CreateTensor(StringToChar(name), type, shape, data, data_len); +} + +MSTensor MSTensor::CreateRefTensor(const std::string &name, enum DataType type, const std::vector &shape, + const void *data, size_t data_len) noexcept { + return CreateRefTensor(StringToChar(name), type, shape, data, data_len); +} + +MSTensor::MSTensor(const std::string &name, enum DataType type, const std::vector &shape, const void *data, + size_t data_len) + : MSTensor(StringToChar(name), type, shape, data, data_len) {} + +std::string MSTensor::Name() const { return CharToString(CharName()); } +} // namespace mindspore +#endif // MINDSPORE_INCLUDE_API_TYPES_H diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/context.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/context.h new file mode 100755 index 00000000000..2623a341890 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/context.h @@ -0,0 +1,66 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INCLUDE_CONTEXT_H_ +#define MINDSPORE_LITE_INCLUDE_CONTEXT_H_ + +#include "include/ms_tensor.h" +#include "include/lite_utils.h" +#include "include/lite_types.h" + +namespace mindspore::lite { +/// \brief CpuDeviceInfo defined for CPU's configuration information. +typedef struct { + bool enable_float16_ = false; /**< prior enable float16 inference */ + CpuBindMode cpu_bind_mode_ = MID_CPU; +} CpuDeviceInfo; + +/// \brief GpuDeviceInfo defined for GPU's configuration information. +typedef struct { + bool enable_float16_ = false; /**< prior enable float16 inference */ +} GpuDeviceInfo; + +/// \brief NpuDeviceInfo defined for NPU's configuration information. +typedef struct { + int frequency_ = 3; /**< npu frequency inference */ +} NpuDeviceInfo; + +/// \brief DeviceInfo defined for backend's configuration information. +union DeviceInfo { + CpuDeviceInfo cpu_device_info_; + GpuDeviceInfo gpu_device_info_; + NpuDeviceInfo npu_device_info_; +}; + +/// \brief DeviceContext defined for holding backend's configuration information. +struct DeviceContext { + DeviceType device_type_ = DT_CPU; + DeviceInfo device_info_; +}; + +/// \brief Context defined for holding environment variables during runtime. +struct Context { + String vendor_name_; + int thread_num_ = 2; /**< thread number config for thread pool */ + AllocatorPtr allocator = nullptr; +#ifndef NOT_USE_STL + DeviceContextVector device_list_ = {{DT_CPU, {false, MID_CPU}}}; +#else + DeviceContextVector device_list_; +#endif // NOT_USE_STL +}; +} // namespace mindspore::lite +#endif // MINDSPORE_LITE_INCLUDE_CONTEXT_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/errorcode.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/errorcode.h new file mode 100755 index 00000000000..9ff4e093795 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/errorcode.h @@ -0,0 +1,74 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INCLUDE_ERRORCODE_H_ +#define MINDSPORE_LITE_INCLUDE_ERRORCODE_H_ + +#include "include/lite_utils.h" + +namespace mindspore { +namespace lite { +/// \brief STATUS defined for holding error code in MindSpore Lite. +using STATUS = int; + +/* Success */ +constexpr int RET_OK = 0; /**< No error occurs. */ + +/* Common error code, range: [-1, -100)*/ +constexpr int RET_ERROR = -1; /**< Common error code. */ +constexpr int RET_NULL_PTR = -2; /**< NULL pointer returned.*/ +constexpr int RET_PARAM_INVALID = -3; /**< Invalid parameter.*/ +constexpr int RET_NO_CHANGE = -4; /**< No change. */ +constexpr int RET_SUCCESS_EXIT = -5; /**< No error but exit. */ +constexpr int RET_MEMORY_FAILED = -6; /**< Fail to create memory. */ +constexpr int RET_NOT_SUPPORT = -7; /**< Fail to support. */ +constexpr int RET_THREAD_POOL_ERROR = -8; /**< Error occur in thread pool. */ + +/* Executor error code, range: [-100,-200) */ +constexpr int RET_OUT_OF_TENSOR_RANGE = -100; /**< Failed to check range. */ +constexpr int RET_INPUT_TENSOR_ERROR = -101; /**< Failed to check input tensor. */ +constexpr int RET_REENTRANT_ERROR = -102; /**< Exist executor running. */ + +/* Graph error code, range: [-200,-300) */ +constexpr int RET_GRAPH_FILE_ERR = -200; /**< Failed to verify graph file. */ + +/* Node error code, range: [-300,-400) */ +constexpr int RET_NOT_FIND_OP = -300; /**< Failed to find operator. */ +constexpr int RET_INVALID_OP_NAME = -301; /**< Invalid operator name. */ +constexpr int RET_INVALID_OP_ATTR = -302; /**< Invalid operator attr. */ +constexpr int RET_OP_EXECUTE_FAILURE = -303; /**< Failed to execution operator. */ + +/* Tensor error code, range: [-400,-500) */ +constexpr int RET_FORMAT_ERR = -400; /**< Failed to checking tensor format. */ + +/* InferShape error code, range: [-500,-600) */ +constexpr int RET_INFER_ERR = -500; /**< Failed to infer shape. */ +constexpr int RET_INFER_INVALID = -501; /**< Invalid infer shape before runtime. */ + +/* User input param error code, range: [-600, 700)*/ +constexpr int RET_INPUT_PARAM_INVALID = -600; /**< Invalid input param by user. */ + +/// \brief Print description of errorcode. +/// +/// \param[in] error_code define return status of procedure. +/// +/// \return String of errorcode info. +String GetErrorInfo(STATUS error_code); + +} // namespace lite +} // namespace mindspore + +#endif // MINDSPORE_LITE_INCLUDE_ERRORCODE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/ir/dtype/type_id.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/ir/dtype/type_id.h new file mode 100755 index 00000000000..46209b8ba43 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/ir/dtype/type_id.h @@ -0,0 +1,95 @@ +/** + * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). + * + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_IR_DTYPE_TYPE_ID_H_ +#define MINDSPORE_CORE_IR_DTYPE_TYPE_ID_H_ + +namespace mindspore { +// +// Supported meta type +// +enum TypeId : int { + kTypeUnknown = 0, + kMetaTypeBegin = kTypeUnknown, + kMetaTypeType, // Type + kMetaTypeAnything, + kMetaTypeObject, + kMetaTypeTypeType, // TypeType + kMetaTypeProblem, + kMetaTypeExternal, + kMetaTypeNone, + kMetaTypeNull, + kMetaTypeEllipsis, + kMetaTypeEnd, + // + // Object types + // + kObjectTypeBegin = kMetaTypeEnd, + kObjectTypeNumber, + kObjectTypeString, + kObjectTypeList, + kObjectTypeTuple, + kObjectTypeSlice, + kObjectTypeKeyword, + kObjectTypeTensorType, + kObjectTypeRowTensorType, + kObjectTypeSparseTensorType, + kObjectTypeUndeterminedType, + kObjectTypeClass, + kObjectTypeDictionary, + kObjectTypeFunction, + kObjectTypeJTagged, + kObjectTypeSymbolicKeyType, + kObjectTypeEnvType, + kObjectTypeRefKey, + kObjectTypeRef, + kObjectTypeEnd, + // + // Number Types + // + kNumberTypeBegin = kObjectTypeEnd, + kNumberTypeBool, + kNumberTypeInt, + kNumberTypeInt8, + kNumberTypeInt16, + kNumberTypeInt32, + kNumberTypeInt64, + kNumberTypeUInt, + kNumberTypeUInt8, + kNumberTypeUInt16, + kNumberTypeUInt32, + kNumberTypeUInt64, + kNumberTypeFloat, + kNumberTypeFloat16, + kNumberTypeFloat32, + kNumberTypeFloat64, + kNumberTypeComplex64, + kNumberTypeEnd, + // + // Monad Types + // + // Monad types is placed at the end of enum, + // in order to keep fit with the type of existing model on the lite side. + kMonadTypeBegin = kNumberTypeEnd, + kObjectTypeMonad, + kObjectTypeUMonad, + kObjectTypeIOMonad, + kMonadTypeEnd +}; +} // namespace mindspore +#endif // MINDSPORE_CORE_IR_DTYPE_TYPE_ID_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/lite_session.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/lite_session.h new file mode 100755 index 00000000000..c3ad559339f --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/lite_session.h @@ -0,0 +1,125 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INCLUDE_LITE_SESSION_H +#define MINDSPORE_LITE_INCLUDE_LITE_SESSION_H + +#ifndef NOT_USE_STL +#include +#endif // NOT_USE_STL +#include "include/ms_tensor.h" +#include "include/model.h" +#include "include/context.h" + +namespace mindspore { +namespace session { +/// \brief LiteSession defined session in MindSpore Lite for compiling Model and forwarding model. +class MS_API LiteSession { + public: + /// \brief Static method to create a LiteSession pointer. + /// + /// \param[in] context Define the context of session to be created. + /// + /// \return Pointer of MindSpore Lite LiteSession. + static LiteSession *CreateSession(const lite::Context *context); + + /// \brief Static method to create a LiteSession pointer which has already compiled a model. + /// + /// \param[in] model_buf Define the buffer read from a model file. + /// \param[in] size Define bytes number of model buffer. + /// \param[in] context Define the context of session to be created. + /// + /// \return Pointer of MindSpore Lite LiteSession. + static LiteSession *CreateSession(const char *model_buf, size_t size, const lite::Context *context); + + /// \brief Destructor of MindSpore Lite LiteSession. + virtual ~LiteSession() = default; + + /// \brief Attempt to bind or unbind threads in the thread pool to or from the specified cpu core. + /// + /// \param[in] if_bind Define whether to bind or unbind threads. + virtual void BindThread(bool if_bind) = 0; + + /// \brief Compile MindSpore Lite model. + /// + /// \note CompileGraph should be called before RunGraph. + /// + /// \param[in] model Define the model to be compiled. + /// + /// \return STATUS as an error code of compiling graph, STATUS is defined in errorcode.h. + virtual int CompileGraph(lite::Model *model) = 0; + + /// \brief Get input MindSpore Lite MSTensors of model. + /// + /// \return The vector of MindSpore Lite MSTensor. + virtual Vector GetInputs() const = 0; + + /// \brief Get input MindSpore Lite MSTensors of model by tensor name. + /// + /// \param[in] node_name Define tensor name. + /// + /// \return The vector of MindSpore Lite MSTensor. + virtual mindspore::tensor::MSTensor *GetInputsByTensorName(const String &tensor_name) const = 0; + + /// \brief Run session with callback. + /// + /// \param[in] before Define a call_back_function to be called before running each node. + /// \param[in] after Define a call_back_function called after running each node. + /// + /// \note RunGraph should be called after CompileGraph. + /// + /// \return STATUS as an error code of running graph, STATUS is defined in errorcode.h. + virtual int RunGraph(const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) = 0; + + /// \brief Get output MindSpore Lite MSTensors of model by node name. + /// + /// \param[in] node_name Define node name. + /// + /// \note Deprecated, replace with GetOutputByTensorName + /// + /// \return The vector of MindSpore Lite MSTensor. + virtual Vector GetOutputsByNodeName(const String &node_name) const = 0; + +#ifndef NOT_USE_STL + /// \brief Get output MindSpore Lite MSTensors of model mapped by tensor name. + /// + /// \return The map of output tensor name and MindSpore Lite MSTensor. + virtual std::unordered_map GetOutputs() const = 0; +#endif + + /// \brief Get name of output tensors of model compiled by this session. + /// + /// \return The vector of string as output tensor names in order. + virtual Vector GetOutputTensorNames() const = 0; + + /// \brief Get output MindSpore Lite MSTensors of model by tensor name. + /// + /// \param[in] tensor_name Define tensor name. + /// + /// \return Pointer of MindSpore Lite MSTensor. + virtual mindspore::tensor::MSTensor *GetOutputByTensorName(const String &tensor_name) const = 0; + + /// \brief Resize inputs shape. + /// + /// \param[in] inputs Define the inputs of the model. + /// \param[in] dims Define the inputs new shape. + /// + /// \return STATUS as an error code of resize inputs, STATUS is defined in errorcode.h. + virtual int Resize(const Vector &inputs, const Vector> &dims) = 0; +}; +} // namespace session +} // namespace mindspore +#endif // MINDSPORE_LITE_INCLUDE_LITE_SESSION_H diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/lite_types.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/lite_types.h new file mode 100755 index 00000000000..d54975193cf --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/lite_types.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INCLUDE_LITE_TYPES_H_ +#define MINDSPORE_LITE_INCLUDE_LITE_TYPES_H_ + +namespace mindspore::lite { +/// \brief CpuBindMode defined for holding bind cpu strategy argument. +typedef enum { + NO_BIND, /**< no bind */ + HIGHER_CPU, /**< bind higher cpu first */ + MID_CPU /**< bind middle cpu first */ +} CpuBindMode; + +/// \brief DeviceType defined for holding user's preferred backend. +typedef enum { + DT_CPU, /**< CPU device type */ + DT_GPU, /**< GPU device type */ + DT_NPU /**< NPU device type */ +} DeviceType; + +} // namespace mindspore::lite +#endif // MINDSPORE_LITE_INCLUDE_LITE_TYPES_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/lite_utils.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/lite_utils.h new file mode 100755 index 00000000000..d7ebedbc12e --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/lite_utils.h @@ -0,0 +1,412 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_ +#define MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_ + +#ifndef NOT_USE_STL +#include +#include +#include +#include +#else +#include +#include +#include +#include +#endif // NOT_USE_STL + +#ifndef MS_API +#ifdef _WIN32 +#define MS_API __declspec(dllexport) +#else +#define MS_API __attribute__((visibility("default"))) +#endif +#endif + +namespace mindspore { +namespace schema { +struct Tensor; +} // namespace schema + +namespace tensor { +class MSTensor; +} // namespace tensor + +namespace lite { +struct DeviceContext; +} // namespace lite + +#ifdef NOT_USE_STL +class String { + public: + String(); + String(size_t count, char ch); + String(const char *s, size_t count); + explicit String(const char *s); + String(const String &other); + String(const String &other, size_t pos, size_t count = npos); + + ~String(); + + String &operator=(const String &str); + String &operator=(const char *str); + + char &at(size_t pos); + const char &at(size_t pos) const; + inline char &operator[](size_t pos); + inline const char &operator[](size_t pos) const; + char *data() noexcept; + const char *data() const noexcept; + const char *c_str() const noexcept; + + // capacity + bool empty() const noexcept; + size_t size() const noexcept; + size_t length() const noexcept; + + // operations + void clear() noexcept; + String &append(size_t count, const char ch); + String &append(const String &str); + String &append(const char *s); + String &operator+(const String &str); + String &operator+=(const String &str); + String &operator+=(const char *str); + String &operator+=(const char ch); + int compare(const String &str) const; + int compare(const char *str) const; + + String substr(size_t pos = 0, size_t count = npos) const; + + static const size_t npos = -1; + + private: + size_t size_; + char *buffer_; +}; + +String operator+(const String &str1, const char *str2); +String operator+(const char *str1, const String &str2); + +String to_string(int32_t value); +String to_string(float value); + +#define DEFAULT_CAPACITY 4 +#define MS_C_EXCEPTION(...) exit(1) +#define MIN(x, y) ((x < y) ? (x) : (y)) +template +class Vector { + public: + Vector() { + size_ = 0; + capacity_ = DEFAULT_CAPACITY; + elem_size_ = sizeof(T); + data_ = nullptr; + } + + explicit Vector(size_t size) { + size_ = size; + elem_size_ = sizeof(T); + capacity_ = (size == 0 ? DEFAULT_CAPACITY : size); + data_ = reinterpret_cast(malloc(capacity_ * elem_size_)); + if (data_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memset(data_, 0, capacity_ * elem_size_); + } + + Vector(size_t size, const T &value) { + size_ = size; + elem_size_ = sizeof(T); + capacity_ = (size == 0 ? DEFAULT_CAPACITY : size); + data_ = reinterpret_cast(malloc(capacity_ * elem_size_)); + if (data_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + for (int i = 0; i < size; ++i) { + data_[i] = value; + } + } + + Vector(const Vector &vec) { + size_ = vec.size_; + elem_size_ = sizeof(T); + capacity_ = vec.capacity_; + data_ = reinterpret_cast(malloc(capacity_ * elem_size_)); + if (data_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memcpy(data_, vec.data_, size_ * elem_size_); + } + + ~Vector() { + if (data_ != nullptr) { + free(data_); + } + } + + void clear() { + size_ = 0; + if (data_ != nullptr) { + free(data_); + data_ = nullptr; + } + } + + void push_back(const T &elem) { + if (data_ == nullptr) { + data_ = reinterpret_cast(malloc(capacity_ * elem_size_)); + if (data_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + } else if (size_ == capacity_) { + resize(size_ + 1); + --size_; + } + data_[size_] = elem; + ++size_; + } + + void push_back(T &&elem) { + if (data_ == nullptr) { + data_ = reinterpret_cast(malloc(capacity_ * elem_size_)); + if (data_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + } else if (size_ == capacity_) { + resize(size_ + 1); + --size_; + } + data_[size_] = elem; + ++size_; + } + + void pop_back() { + if (size_ > 0) { + --size_; + } else { + MS_C_EXCEPTION("Index is out of range!"); + } + } + + void insert(const T &elem, size_t index) { + if (index <= size_) { + ++size_; + if (size_ > capacity_) { + resize(size_); + } + if (index == size_ - 1) { + push_back(elem); + } else { + memmove(data_ + index + 1, data_ + index, (size_ - index - 1) * elem_size_); + data_[index] = elem; + } + } else { + MS_C_EXCEPTION("Input index is out of range!"); + } + } + + T *begin() { return data_; } + + const T *begin() const { return data_; } + + T *end() { return data_ + size_; } + + const T *end() const { return data_ + size_; } + + T &front() { + if (size_ > 0) { + return data_[0]; + } + MS_C_EXCEPTION("Index is out of range!"); + } + + const T &front() const { + if (size_ > 0) { + return data_[0]; + } + MS_C_EXCEPTION("Index is out of range!"); + } + + T &back() { + if (size_ > 0) { + return data_[size_ - 1]; + } + MS_C_EXCEPTION("Index is out of range!"); + } + + const T &back() const { + if (size_ > 0) { + return data_[size_ - 1]; + } + MS_C_EXCEPTION("Index is out of range!"); + } + + T &at(size_t index) { + if (index < size_) { + return data_[index]; + } + MS_C_EXCEPTION("Input index is out of range!"); + } + + const T &at(size_t index) const { + if (index < size_) { + return data_[index]; + } + MS_C_EXCEPTION("Input index is out of range!"); + } + + T &operator[](size_t index) { + if (index < size_) { + return data_[index]; + } + MS_C_EXCEPTION("Input index is out of range!"); + } + + const T &operator[](size_t index) const { + if (index < size_) { + return data_[index]; + } + MS_C_EXCEPTION("Input index is out of range!"); + } + + T *data() { return data_; } + + const T *data() const { return data_; } + + size_t size() const { return size_; } + + size_t capacity() const { return capacity_; } + + bool empty() const { return size_ == 0; } + + void erase(size_t index) { + if (index == size_ - 1) { + --size_; + } else if (index < size_) { + memmove(data_ + index, data_ + index + 1, (size_ - index - 1) * elem_size_); + --size_; + } else { + MS_C_EXCEPTION("Input index is out of range!"); + } + } + + void resize(size_t size) { + while (size > capacity_) { + capacity_ *= 2; + } + T *tmp = data_; + data_ = reinterpret_cast(malloc(capacity_ * elem_size_)); + if (data_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memcpy(data_, tmp, MIN(size, size_) * elem_size_); + size_ = size; + free(tmp); + } + + void reserve(size_t capacity) { + if (capacity > capacity_) { + capacity_ = capacity; + } + } + + Vector &operator=(const Vector &vec) { + if (this == &vec) { + return *this; + } + size_ = vec.size_; + elem_size_ = sizeof(T); + capacity_ = vec.capacity_; + data_ = reinterpret_cast(malloc(capacity_ * elem_size_)); + if (data_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memcpy(data_, vec.data_, size_ * elem_size_); + return *this; + } + + private: + size_t size_; + size_t elem_size_; + size_t capacity_; + T *data_; +}; +using TensorPtrVector = Vector; +using Uint32Vector = Vector; +using AllocatorPtr = void *; +using DeviceContextVector = Vector; +using KernelCallBack = void (*)(void *, void *); +#else +/// \brief Allocator defined a memory pool for malloc memory and free memory dynamically. +/// +/// \note List public class and interface for reference. +class Allocator; +using AllocatorPtr = std::shared_ptr; + +using TensorPtrVector = std::vector; +using Uint32Vector = std::vector; +template +using Vector = std::vector; + +template +inline std::string to_string(T t) { + return std::to_string(t); +} + +namespace tensor { +using String = std::string; +} // namespace tensor + +namespace session { +using String = std::string; +} // namespace session + +/// \brief CallBackParam defined input arguments for callBack function. +struct CallBackParam { + session::String node_name; /**< node name argument */ + session::String node_type; /**< node type argument */ +}; + +struct GPUCallBackParam : CallBackParam { + double execute_time{-1.f}; +}; + +/// \brief KernelCallBack defined the function pointer for callBack. +using KernelCallBack = std::function inputs, Vector outputs, + const CallBackParam &opInfo)>; + +namespace lite { +using String = std::string; +using DeviceContextVector = std::vector; + +/// \brief Set data of MSTensor from string vector. +/// +/// \param[in] input string vector. +/// \param[out] MSTensor. +/// +/// \return STATUS as an error code of this interface, STATUS is defined in errorcode.h. +int MS_API StringsToMSTensor(const Vector &inputs, tensor::MSTensor *tensor); + +/// \brief Get string vector from MSTensor. +/// \param[in] MSTensor. +/// \return string vector. +Vector MS_API MSTensorToStrings(const tensor::MSTensor *tensor); +} // namespace lite +#endif // NOT_USE_STL +} // namespace mindspore +#endif // MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/model.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/model.h new file mode 100755 index 00000000000..22d2ffa30b9 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/model.h @@ -0,0 +1,66 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_INCLUDE_MODEL_H_ +#define MINDSPORE_LITE_INCLUDE_MODEL_H_ + +#include "include/lite_utils.h" + +namespace mindspore::lite { +struct MS_API Model { + struct Node { + String name_; + int node_type_; + const void *primitive_; + Uint32Vector input_indices_; + Uint32Vector output_indices_; + int quant_type_; + }; + using NodePtrVector = Vector; + struct SubGraph { + String name_; + Uint32Vector input_indices_; + Uint32Vector output_indices_; + Uint32Vector node_indices_; + Uint32Vector tensor_indices_; + }; + using SubGraphPtrVector = Vector; + String name_; + String version_; + TensorPtrVector all_tensors_; + NodePtrVector all_nodes_; + char *buf; + SubGraphPtrVector sub_graphs_; + + /// \brief Static method to create a Model pointer. + /// + /// \param[in] model_buf Define the buffer read from a model file. + /// \param[in] size Define bytes number of model buffer. + /// + /// \return Pointer of MindSpore Lite Model. + static Model *Import(const char *model_buf, size_t size); + + /// \brief Free meta graph temporary buffer + virtual void Free() = 0; + + /// \brief Free all temporary buffer.EG: nodes in the model. + virtual void Destroy() = 0; + + /// \brief Model destruct, free all memory + virtual ~Model() = default; +}; +} // namespace mindspore::lite + +#endif // MINDSPORE_LITE_INCLUDE_MODEL_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/ms_tensor.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/ms_tensor.h new file mode 100755 index 00000000000..1a6f3112b8a --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/ms_tensor.h @@ -0,0 +1,97 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_ +#define MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_ + +#include "include/lite_utils.h" +#include "ir/dtype/type_id.h" + +namespace mindspore { +namespace tensor { +/// \brief MSTensor defined tensor in MindSpore Lite. +class MS_API MSTensor { + public: + /// \brief Constructor of MindSpore Lite MSTensor. + /// + /// \return Instance of MindSpore Lite MSTensor. + MSTensor() = default; + + /// \brief Destructor of MindSpore Lite Model. + virtual ~MSTensor() = default; + + /// \brief Create a MSTensor. + /// + /// \return Pointer to an instance of MindSpore Lite MSTensor. + static MSTensor *CreateTensor(const String &name, TypeId type, const Vector &shape, const void *data, + size_t data_len); + + /// \brief Get data type of the MindSpore Lite MSTensor. + /// + /// \note TypeId is defined in mindspore/mindspore/include/api/type_id.h. Only number types in TypeId enum are + /// suitable for MSTensor. + /// + /// \return MindSpore Lite TypeId of the MindSpore Lite MSTensor. + virtual TypeId data_type() const = 0; + + /// \brief Get shape of the MindSpore Lite MSTensor. + /// + /// \return A vector of int as the shape of the MindSpore Lite MSTensor. + virtual Vector shape() const = 0; + + /// \brief Set the shape of MSTensor. + virtual void set_shape(const Vector &name) = 0; + + /// \brief Get number of element in MSTensor. + /// + /// \return Number of element in MSTensor. + virtual int ElementsNum() const = 0; + + /// \brief Get byte size of data in MSTensor. + /// + /// \return Byte size of data in MSTensor. + virtual size_t Size() const = 0; + + /// \brief Get the name of MSTensor. + /// + /// \return the name of MSTensor. + virtual String tensor_name() const = 0; + + /// \brief Set the name of MSTensor. + virtual void set_tensor_name(const String name) = 0; + + /// \brief Get the pointer of data in MSTensor. + /// + /// \note The data pointer can be used to both write and read data in MSTensor. The memory buffer will be + /// automatically allocated. + /// + /// \return the pointer points to data in MSTensor. + virtual void *MutableData() = 0; + + /// \brief Get the pointer of data in MSTensor. + /// + /// \note The data pointer can be used to both write and read data in MSTensor. No memory buffer will be + /// allocated. + /// + /// \return the pointer points to data in MSTensor. + virtual void *data() = 0; + + /// \brief Set the data of MSTensor. + virtual void set_data(void *data) = 0; +}; +} // namespace tensor +} // namespace mindspore +#endif // MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/version.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/version.h new file mode 100755 index 00000000000..fd995b27bb6 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/include/version.h @@ -0,0 +1,38 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_INCLUDE_VERSION_H_ +#define MINDSPORE_LITE_INCLUDE_VERSION_H_ + +#include "include/lite_utils.h" + +namespace mindspore { +namespace lite { +const int ms_version_major = 1; +const int ms_version_minor = 2; +const int ms_version_revision = 0; + +/// \brief Global method to get a version string. +/// +/// \return The version string of MindSpore Lite. +inline String Version() { + return "MindSpore Lite " + to_string(ms_version_major) + "." + to_string(ms_version_revision) + "." + + to_string(ms_version_revision); +} +} // namespace lite +} // namespace mindspore + +#endif // MINDSPORE_LITE_INCLUDE_VERSION_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cachel1_armv7.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cachel1_armv7.h new file mode 100755 index 00000000000..d2c3e2291fd --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cachel1_armv7.h @@ -0,0 +1,411 @@ +/****************************************************************************** + * @file cachel1_armv7.h + * @brief CMSIS Level 1 Cache API for Armv7-M and later + * @version V1.0.0 + * @date 03. March 2020 + ******************************************************************************/ +/* + * Copyright (c) 2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef ARM_CACHEL1_ARMV7_H +#define ARM_CACHEL1_ARMV7_H + +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_CacheFunctions Cache Functions + \brief Functions that configure Instruction and Data cache. + @{ + */ + +/* Cache Size ID Register Macros */ +#define CCSIDR_WAYS(x) (((x) & SCB_CCSIDR_ASSOCIATIVITY_Msk) >> SCB_CCSIDR_ASSOCIATIVITY_Pos) +#define CCSIDR_SETS(x) (((x) & SCB_CCSIDR_NUMSETS_Msk ) >> SCB_CCSIDR_NUMSETS_Pos ) + +#ifndef __SCB_DCACHE_LINE_SIZE +#define __SCB_DCACHE_LINE_SIZE 32U /*!< Cortex-M7 cache line size is fixed to 32 bytes (8 words). See also register SCB_CCSIDR */ +#endif + +#ifndef __SCB_ICACHE_LINE_SIZE +#define __SCB_ICACHE_LINE_SIZE 32U /*!< Cortex-M7 cache line size is fixed to 32 bytes (8 words). See also register SCB_CCSIDR */ +#endif + +/** + \brief Enable I-Cache + \details Turns on I-Cache + */ +__STATIC_FORCEINLINE void SCB_EnableICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + if (SCB->CCR & SCB_CCR_IC_Msk) return; /* return if ICache is already enabled */ + + __DSB(); + __ISB(); + SCB->ICIALLU = 0UL; /* invalidate I-Cache */ + __DSB(); + __ISB(); + SCB->CCR |= (uint32_t)SCB_CCR_IC_Msk; /* enable I-Cache */ + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Disable I-Cache + \details Turns off I-Cache + */ +__STATIC_FORCEINLINE void SCB_DisableICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + __DSB(); + __ISB(); + SCB->CCR &= ~(uint32_t)SCB_CCR_IC_Msk; /* disable I-Cache */ + SCB->ICIALLU = 0UL; /* invalidate I-Cache */ + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Invalidate I-Cache + \details Invalidates I-Cache + */ +__STATIC_FORCEINLINE void SCB_InvalidateICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + __DSB(); + __ISB(); + SCB->ICIALLU = 0UL; + __DSB(); + __ISB(); + #endif +} + + +/** + \brief I-Cache Invalidate by address + \details Invalidates I-Cache for the given address. + I-Cache is invalidated starting from a 32 byte aligned address in 32 byte granularity. + I-Cache memory blocks which are part of given address + given size are invalidated. + \param[in] addr address + \param[in] isize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_InvalidateICache_by_Addr (void *addr, int32_t isize) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + if ( isize > 0 ) { + int32_t op_size = isize + (((uint32_t)addr) & (__SCB_ICACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_ICACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->ICIMVAU = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_ICACHE_LINE_SIZE; + op_size -= __SCB_ICACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + + +/** + \brief Enable D-Cache + \details Turns on D-Cache + */ +__STATIC_FORCEINLINE void SCB_EnableDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + if (SCB->CCR & SCB_CCR_DC_Msk) return; /* return if DCache is already enabled */ + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) | + ((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + __DSB(); + + SCB->CCR |= (uint32_t)SCB_CCR_DC_Msk; /* enable D-Cache */ + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Disable D-Cache + \details Turns off D-Cache + */ +__STATIC_FORCEINLINE void SCB_DisableDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + SCB->CCR &= ~(uint32_t)SCB_CCR_DC_Msk; /* disable D-Cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean & invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) | + ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Invalidate D-Cache + \details Invalidates D-Cache + */ +__STATIC_FORCEINLINE void SCB_InvalidateDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) | + ((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Clean D-Cache + \details Cleans D-Cache + */ +__STATIC_FORCEINLINE void SCB_CleanDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCSW = (((sets << SCB_DCCSW_SET_Pos) & SCB_DCCSW_SET_Msk) | + ((ways << SCB_DCCSW_WAY_Pos) & SCB_DCCSW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Clean & Invalidate D-Cache + \details Cleans and Invalidates D-Cache + */ +__STATIC_FORCEINLINE void SCB_CleanInvalidateDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean & invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) | + ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief D-Cache Invalidate by address + \details Invalidates D-Cache for the given address. + D-Cache is invalidated starting from a 32 byte aligned address in 32 byte granularity. + D-Cache memory blocks which are part of given address + given size are invalidated. + \param[in] addr address + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_InvalidateDCache_by_Addr (void *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + if ( dsize > 0 ) { + int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->DCIMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_DCACHE_LINE_SIZE; + op_size -= __SCB_DCACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + + +/** + \brief D-Cache Clean by address + \details Cleans D-Cache for the given address + D-Cache is cleaned starting from a 32 byte aligned address in 32 byte granularity. + D-Cache memory blocks which are part of given address + given size are cleaned. + \param[in] addr address + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_CleanDCache_by_Addr (uint32_t *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + if ( dsize > 0 ) { + int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->DCCMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_DCACHE_LINE_SIZE; + op_size -= __SCB_DCACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + + +/** + \brief D-Cache Clean and Invalidate by address + \details Cleans and invalidates D_Cache for the given address + D-Cache is cleaned and invalidated starting from a 32 byte aligned address in 32 byte granularity. + D-Cache memory blocks which are part of given address + given size are cleaned and invalidated. + \param[in] addr address (aligned to 32-byte boundary) + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_CleanInvalidateDCache_by_Addr (uint32_t *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + if ( dsize > 0 ) { + int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->DCCIMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_DCACHE_LINE_SIZE; + op_size -= __SCB_DCACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + +/*@} end of CMSIS_Core_CacheFunctions */ + +#endif /* ARM_CACHEL1_ARMV7_H */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_armcc.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_armcc.h new file mode 100755 index 00000000000..237ff6ec3ea --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_armcc.h @@ -0,0 +1,885 @@ +/**************************************************************************//** + * @file cmsis_armcc.h + * @brief CMSIS compiler ARMCC (Arm Compiler 5) header file + * @version V5.2.1 + * @date 26. March 2020 + ******************************************************************************/ +/* + * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_ARMCC_H +#define __CMSIS_ARMCC_H + + +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 400677) + #error "Please use Arm Compiler Toolchain V4.0.677 or later!" +#endif + +/* CMSIS compiler control architecture macros */ +#if ((defined (__TARGET_ARCH_6_M ) && (__TARGET_ARCH_6_M == 1)) || \ + (defined (__TARGET_ARCH_6S_M ) && (__TARGET_ARCH_6S_M == 1)) ) + #define __ARM_ARCH_6M__ 1 +#endif + +#if (defined (__TARGET_ARCH_7_M ) && (__TARGET_ARCH_7_M == 1)) + #define __ARM_ARCH_7M__ 1 +#endif + +#if (defined (__TARGET_ARCH_7E_M) && (__TARGET_ARCH_7E_M == 1)) + #define __ARM_ARCH_7EM__ 1 +#endif + + /* __ARM_ARCH_8M_BASE__ not applicable */ + /* __ARM_ARCH_8M_MAIN__ not applicable */ + /* __ARM_ARCH_8_1M_MAIN__ not applicable */ + +/* CMSIS compiler control DSP macros */ +#if ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) + #define __ARM_FEATURE_DSP 1 +#endif + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM __asm +#endif +#ifndef __INLINE + #define __INLINE __inline +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static __inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE static __forceinline +#endif +#ifndef __NO_RETURN + #define __NO_RETURN __declspec(noreturn) +#endif +#ifndef __USED + #define __USED __attribute__((used)) +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed)) +#endif +#ifndef __PACKED_STRUCT + #define __PACKED_STRUCT __packed struct +#endif +#ifndef __PACKED_UNION + #define __PACKED_UNION __packed union +#endif +#ifndef __UNALIGNED_UINT32 /* deprecated */ + #define __UNALIGNED_UINT32(x) (*((__packed uint32_t *)(x))) +#endif +#ifndef __UNALIGNED_UINT16_WRITE + #define __UNALIGNED_UINT16_WRITE(addr, val) ((*((__packed uint16_t *)(addr))) = (val)) +#endif +#ifndef __UNALIGNED_UINT16_READ + #define __UNALIGNED_UINT16_READ(addr) (*((const __packed uint16_t *)(addr))) +#endif +#ifndef __UNALIGNED_UINT32_WRITE + #define __UNALIGNED_UINT32_WRITE(addr, val) ((*((__packed uint32_t *)(addr))) = (val)) +#endif +#ifndef __UNALIGNED_UINT32_READ + #define __UNALIGNED_UINT32_READ(addr) (*((const __packed uint32_t *)(addr))) +#endif +#ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) +#endif +#ifndef __RESTRICT + #define __RESTRICT __restrict +#endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __memory_changed() +#endif + +/* ######################### Startup and Lowlevel Init ######################## */ + +#ifndef __PROGRAM_START +#define __PROGRAM_START __main +#endif + +#ifndef __INITIAL_SP +#define __INITIAL_SP Image$$ARM_LIB_STACK$$ZI$$Limit +#endif + +#ifndef __STACK_LIMIT +#define __STACK_LIMIT Image$$ARM_LIB_STACK$$ZI$$Base +#endif + +#ifndef __VECTOR_TABLE +#define __VECTOR_TABLE __Vectors +#endif + +#ifndef __VECTOR_TABLE_ATTRIBUTE +#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET"))) +#endif + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +/* intrinsic void __enable_irq(); */ + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +/* intrinsic void __disable_irq(); */ + +/** + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value + */ +__STATIC_INLINE uint32_t __get_CONTROL(void) +{ + register uint32_t __regControl __ASM("control"); + return(__regControl); +} + + +/** + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set + */ +__STATIC_INLINE void __set_CONTROL(uint32_t control) +{ + register uint32_t __regControl __ASM("control"); + __regControl = control; +} + + +/** + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value + */ +__STATIC_INLINE uint32_t __get_IPSR(void) +{ + register uint32_t __regIPSR __ASM("ipsr"); + return(__regIPSR); +} + + +/** + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value + */ +__STATIC_INLINE uint32_t __get_APSR(void) +{ + register uint32_t __regAPSR __ASM("apsr"); + return(__regAPSR); +} + + +/** + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value + */ +__STATIC_INLINE uint32_t __get_xPSR(void) +{ + register uint32_t __regXPSR __ASM("xpsr"); + return(__regXPSR); +} + + +/** + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value + */ +__STATIC_INLINE uint32_t __get_PSP(void) +{ + register uint32_t __regProcessStackPointer __ASM("psp"); + return(__regProcessStackPointer); +} + + +/** + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_INLINE void __set_PSP(uint32_t topOfProcStack) +{ + register uint32_t __regProcessStackPointer __ASM("psp"); + __regProcessStackPointer = topOfProcStack; +} + + +/** + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value + */ +__STATIC_INLINE uint32_t __get_MSP(void) +{ + register uint32_t __regMainStackPointer __ASM("msp"); + return(__regMainStackPointer); +} + + +/** + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_INLINE void __set_MSP(uint32_t topOfMainStack) +{ + register uint32_t __regMainStackPointer __ASM("msp"); + __regMainStackPointer = topOfMainStack; +} + + +/** + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value + */ +__STATIC_INLINE uint32_t __get_PRIMASK(void) +{ + register uint32_t __regPriMask __ASM("primask"); + return(__regPriMask); +} + + +/** + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask + */ +__STATIC_INLINE void __set_PRIMASK(uint32_t priMask) +{ + register uint32_t __regPriMask __ASM("primask"); + __regPriMask = (priMask); +} + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) + +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +#define __enable_fault_irq __enable_fiq + + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +#define __disable_fault_irq __disable_fiq + + +/** + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value + */ +__STATIC_INLINE uint32_t __get_BASEPRI(void) +{ + register uint32_t __regBasePri __ASM("basepri"); + return(__regBasePri); +} + + +/** + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set + */ +__STATIC_INLINE void __set_BASEPRI(uint32_t basePri) +{ + register uint32_t __regBasePri __ASM("basepri"); + __regBasePri = (basePri & 0xFFU); +} + + +/** + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set + */ +__STATIC_INLINE void __set_BASEPRI_MAX(uint32_t basePri) +{ + register uint32_t __regBasePriMax __ASM("basepri_max"); + __regBasePriMax = (basePri & 0xFFU); +} + + +/** + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value + */ +__STATIC_INLINE uint32_t __get_FAULTMASK(void) +{ + register uint32_t __regFaultMask __ASM("faultmask"); + return(__regFaultMask); +} + + +/** + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask) +{ + register uint32_t __regFaultMask __ASM("faultmask"); + __regFaultMask = (faultMask & (uint32_t)1U); +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ + + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +__STATIC_INLINE uint32_t __get_FPSCR(void) +{ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) + register uint32_t __regfpscr __ASM("fpscr"); + return(__regfpscr); +#else + return(0U); +#endif +} + + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +__STATIC_INLINE void __set_FPSCR(uint32_t fpscr) +{ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) + register uint32_t __regfpscr __ASM("fpscr"); + __regfpscr = (fpscr); +#else + (void)fpscr; +#endif +} + + +/*@} end of CMSIS_Core_RegAccFunctions */ + + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ + +/** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. + */ +#define __NOP __nop + + +/** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + */ +#define __WFI __wfi + + +/** + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. + */ +#define __WFE __wfe + + +/** + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + */ +#define __SEV __sev + + +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +#define __ISB() __isb(0xF) + +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +#define __DSB() __dsb(0xF) + +/** + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. + */ +#define __DMB() __dmb(0xF) + + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV __rev + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +#ifndef __NO_EMBEDDED_ASM +__attribute__((section(".rev16_text"))) __STATIC_INLINE __ASM uint32_t __REV16(uint32_t value) +{ + rev16 r0, r0 + bx lr +} +#endif + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ +#ifndef __NO_EMBEDDED_ASM +__attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int16_t __REVSH(int16_t value) +{ + revsh r0, r0 + bx lr +} +#endif + + +/** + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ +#define __ROR __ror + + +/** + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __breakpoint(value) + + +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) + #define __RBIT __rbit +#else +__attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value) +{ + uint32_t result; + uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ + + result = value; /* r will be reversed bits of v; first get LSB of v */ + for (value >>= 1U; value != 0U; value >>= 1U) + { + result <<= 1U; + result |= value & 1U; + s--; + } + result <<= s; /* shift when v's highest bits are zero */ + return result; +} +#endif + + +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +#define __CLZ __clz + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) + +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __LDREXB(ptr) ((uint8_t ) __ldrex(ptr)) +#else + #define __LDREXB(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint8_t ) __ldrex(ptr)) _Pragma("pop") +#endif + + +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __LDREXH(ptr) ((uint16_t) __ldrex(ptr)) +#else + #define __LDREXH(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint16_t) __ldrex(ptr)) _Pragma("pop") +#endif + + +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __LDREXW(ptr) ((uint32_t ) __ldrex(ptr)) +#else + #define __LDREXW(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint32_t ) __ldrex(ptr)) _Pragma("pop") +#endif + + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __STREXB(value, ptr) __strex(value, ptr) +#else + #define __STREXB(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") +#endif + + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __STREXH(value, ptr) __strex(value, ptr) +#else + #define __STREXH(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") +#endif + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __STREXW(value, ptr) __strex(value, ptr) +#else + #define __STREXW(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") +#endif + + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +#define __CLREX __clrex + + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT __ssat + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT __usat + + +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +#ifndef __NO_EMBEDDED_ASM +__attribute__((section(".rrx_text"))) __STATIC_INLINE __ASM uint32_t __RRX(uint32_t value) +{ + rrx r0, r0 + bx lr +} +#endif + + +/** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDRBT(ptr) ((uint8_t ) __ldrt(ptr)) + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDRHT(ptr) ((uint16_t) __ldrt(ptr)) + + +/** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDRT(ptr) ((uint32_t ) __ldrt(ptr)) + + +/** + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +#define __STRBT(value, ptr) __strt(value, ptr) + + +/** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +#define __STRHT(value, ptr) __strt(value, ptr) + + +/** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +#define __STRT(value, ptr) __strt(value, ptr) + +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__attribute__((always_inline)) __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; +} + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__attribute__((always_inline)) __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ + +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +#if ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) + +#define __SADD8 __sadd8 +#define __QADD8 __qadd8 +#define __SHADD8 __shadd8 +#define __UADD8 __uadd8 +#define __UQADD8 __uqadd8 +#define __UHADD8 __uhadd8 +#define __SSUB8 __ssub8 +#define __QSUB8 __qsub8 +#define __SHSUB8 __shsub8 +#define __USUB8 __usub8 +#define __UQSUB8 __uqsub8 +#define __UHSUB8 __uhsub8 +#define __SADD16 __sadd16 +#define __QADD16 __qadd16 +#define __SHADD16 __shadd16 +#define __UADD16 __uadd16 +#define __UQADD16 __uqadd16 +#define __UHADD16 __uhadd16 +#define __SSUB16 __ssub16 +#define __QSUB16 __qsub16 +#define __SHSUB16 __shsub16 +#define __USUB16 __usub16 +#define __UQSUB16 __uqsub16 +#define __UHSUB16 __uhsub16 +#define __SASX __sasx +#define __QASX __qasx +#define __SHASX __shasx +#define __UASX __uasx +#define __UQASX __uqasx +#define __UHASX __uhasx +#define __SSAX __ssax +#define __QSAX __qsax +#define __SHSAX __shsax +#define __USAX __usax +#define __UQSAX __uqsax +#define __UHSAX __uhsax +#define __USAD8 __usad8 +#define __USADA8 __usada8 +#define __SSAT16 __ssat16 +#define __USAT16 __usat16 +#define __UXTB16 __uxtb16 +#define __UXTAB16 __uxtab16 +#define __SXTB16 __sxtb16 +#define __SXTAB16 __sxtab16 +#define __SMUAD __smuad +#define __SMUADX __smuadx +#define __SMLAD __smlad +#define __SMLADX __smladx +#define __SMLALD __smlald +#define __SMLALDX __smlaldx +#define __SMUSD __smusd +#define __SMUSDX __smusdx +#define __SMLSD __smlsd +#define __SMLSDX __smlsdx +#define __SMLSLD __smlsld +#define __SMLSLDX __smlsldx +#define __SEL __sel +#define __QADD __qadd +#define __QSUB __qsub + +#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ + ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) + +#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ + ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) + +#define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \ + ((int64_t)(ARG3) << 32U) ) >> 32U)) + +#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) + +#endif /* ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ +/*@} end of group CMSIS_SIMD_intrinsics */ + + +#endif /* __CMSIS_ARMCC_H */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_armclang.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_armclang.h new file mode 100755 index 00000000000..90de9dbf8f8 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_armclang.h @@ -0,0 +1,1467 @@ +/**************************************************************************//** + * @file cmsis_armclang.h + * @brief CMSIS compiler armclang (Arm Compiler 6) header file + * @version V5.3.1 + * @date 26. March 2020 + ******************************************************************************/ +/* + * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*lint -esym(9058, IRQn)*/ /* disable MISRA 2012 Rule 2.4 for IRQn */ + +#ifndef __CMSIS_ARMCLANG_H +#define __CMSIS_ARMCLANG_H + +#pragma clang system_header /* treat file as system include file */ + +#ifndef __ARM_COMPAT_H +#include /* Compatibility header for Arm Compiler 5 intrinsics */ +#endif + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM __asm +#endif +#ifndef __INLINE + #define __INLINE __inline +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static __inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline +#endif +#ifndef __NO_RETURN + #define __NO_RETURN __attribute__((__noreturn__)) +#endif +#ifndef __USED + #define __USED __attribute__((used)) +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed, aligned(1))) +#endif +#ifndef __UNALIGNED_UINT32 /* deprecated */ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32 */ + struct __attribute__((packed)) T_UINT32 { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) +#endif +#ifndef __UNALIGNED_UINT16_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT16_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_WRITE */ + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT16_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT16_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_READ */ + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) +#endif +#ifndef __UNALIGNED_UINT32_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_WRITE */ + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT32_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_READ */ + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) +#endif +#ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) +#endif +#ifndef __RESTRICT + #define __RESTRICT __restrict +#endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif + +/* ######################### Startup and Lowlevel Init ######################## */ + +#ifndef __PROGRAM_START +#define __PROGRAM_START __main +#endif + +#ifndef __INITIAL_SP +#define __INITIAL_SP Image$$ARM_LIB_STACK$$ZI$$Limit +#endif + +#ifndef __STACK_LIMIT +#define __STACK_LIMIT Image$$ARM_LIB_STACK$$ZI$$Base +#endif + +#ifndef __VECTOR_TABLE +#define __VECTOR_TABLE __Vectors +#endif + +#ifndef __VECTOR_TABLE_ATTRIBUTE +#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET"))) +#endif + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +/* intrinsic void __enable_irq(); see arm_compat.h */ + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +/* intrinsic void __disable_irq(); see arm_compat.h */ + + +/** + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +{ + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +{ + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); +} +#endif + + +/** + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_IPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_APSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_xPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); +} +#endif + + +/** + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); +} +#endif + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return(result); +} + + +/** + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) +{ + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); +} +#endif + + +/** + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +{ + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) +{ + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); +} +#endif + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +#define __enable_fault_irq __enable_fiq /* see arm_compat.h */ + + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +#define __disable_fault_irq __disable_fiq /* see arm_compat.h */ + + +/** + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) +{ + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); +} +#endif + + +/** + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); +} + + +/** + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); +} +#endif + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) + +/** + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) +{ +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return result; +#endif +} + +#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) +{ +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return result; +#endif +} +#endif + + +/** + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) +{ +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) +{ +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); +#endif +} +#endif + + +/** + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) +{ +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return result; +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) +{ +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return result; +#endif +} +#endif + + +/** + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) +{ +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) +{ +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); +#endif +} +#endif + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __get_FPSCR (uint32_t)__builtin_arm_get_fpscr +#else +#define __get_FPSCR() ((uint32_t)0U) +#endif + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __set_FPSCR __builtin_arm_set_fpscr +#else +#define __set_FPSCR(x) ((void)(x)) +#endif + + +/*@} end of CMSIS_Core_RegAccFunctions */ + + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ + +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_RW_REG(r) "+l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_RW_REG(r) "+r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif + +/** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. + */ +#define __NOP __builtin_arm_nop + +/** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + */ +#define __WFI __builtin_arm_wfi + + +/** + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. + */ +#define __WFE __builtin_arm_wfe + + +/** + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + */ +#define __SEV __builtin_arm_sev + + +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +#define __ISB() __builtin_arm_isb(0xF) + +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +#define __DSB() __builtin_arm_dsb(0xF) + + +/** + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. + */ +#define __DMB() __builtin_arm_dmb(0xF) + + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV(value) __builtin_bswap32(value) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV16(value) __ROR(__REV(value), 16) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REVSH(value) (int16_t)__builtin_bswap16(value) + + +/** + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +{ + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); +} + + +/** + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __ASM volatile ("bkpt "#value) + + +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +#define __RBIT __builtin_arm_rbit + +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +{ + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees ARM-compatible results if happening to compile on a non-ARM + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); +} + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) + +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDREXB (uint8_t)__builtin_arm_ldrex + + +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDREXH (uint16_t)__builtin_arm_ldrex + + +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDREXW (uint32_t)__builtin_arm_ldrex + + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXB (uint32_t)__builtin_arm_strex + + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXH (uint32_t)__builtin_arm_strex + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXW (uint32_t)__builtin_arm_strex + + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +#define __CLREX __builtin_arm_clrex + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT __builtin_arm_ssat + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT __builtin_arm_usat + + +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +{ + uint32_t result; + + __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return(result); +} + + +/** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return(result); +} + + +/** + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); +} + +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; +} + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) + +/** + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t) result); +} + + +/** + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t) result); +} + + +/** + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return(result); +} + + +/** + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDAEXB (uint8_t)__builtin_arm_ldaex + + +/** + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDAEXH (uint16_t)__builtin_arm_ldaex + + +/** + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDAEX (uint32_t)__builtin_arm_ldaex + + +/** + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXB (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXH (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEX (uint32_t)__builtin_arm_stlex + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ + +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) + +#define __SADD8 __builtin_arm_sadd8 +#define __QADD8 __builtin_arm_qadd8 +#define __SHADD8 __builtin_arm_shadd8 +#define __UADD8 __builtin_arm_uadd8 +#define __UQADD8 __builtin_arm_uqadd8 +#define __UHADD8 __builtin_arm_uhadd8 +#define __SSUB8 __builtin_arm_ssub8 +#define __QSUB8 __builtin_arm_qsub8 +#define __SHSUB8 __builtin_arm_shsub8 +#define __USUB8 __builtin_arm_usub8 +#define __UQSUB8 __builtin_arm_uqsub8 +#define __UHSUB8 __builtin_arm_uhsub8 +#define __SADD16 __builtin_arm_sadd16 +#define __QADD16 __builtin_arm_qadd16 +#define __SHADD16 __builtin_arm_shadd16 +#define __UADD16 __builtin_arm_uadd16 +#define __UQADD16 __builtin_arm_uqadd16 +#define __UHADD16 __builtin_arm_uhadd16 +#define __SSUB16 __builtin_arm_ssub16 +#define __QSUB16 __builtin_arm_qsub16 +#define __SHSUB16 __builtin_arm_shsub16 +#define __USUB16 __builtin_arm_usub16 +#define __UQSUB16 __builtin_arm_uqsub16 +#define __UHSUB16 __builtin_arm_uhsub16 +#define __SASX __builtin_arm_sasx +#define __QASX __builtin_arm_qasx +#define __SHASX __builtin_arm_shasx +#define __UASX __builtin_arm_uasx +#define __UQASX __builtin_arm_uqasx +#define __UHASX __builtin_arm_uhasx +#define __SSAX __builtin_arm_ssax +#define __QSAX __builtin_arm_qsax +#define __SHSAX __builtin_arm_shsax +#define __USAX __builtin_arm_usax +#define __UQSAX __builtin_arm_uqsax +#define __UHSAX __builtin_arm_uhsax +#define __USAD8 __builtin_arm_usad8 +#define __USADA8 __builtin_arm_usada8 +#define __SSAT16 __builtin_arm_ssat16 +#define __USAT16 __builtin_arm_usat16 +#define __UXTB16 __builtin_arm_uxtb16 +#define __UXTAB16 __builtin_arm_uxtab16 +#define __SXTB16 __builtin_arm_sxtb16 +#define __SXTAB16 __builtin_arm_sxtab16 +#define __SMUAD __builtin_arm_smuad +#define __SMUADX __builtin_arm_smuadx +#define __SMLAD __builtin_arm_smlad +#define __SMLADX __builtin_arm_smladx +#define __SMLALD __builtin_arm_smlald +#define __SMLALDX __builtin_arm_smlaldx +#define __SMUSD __builtin_arm_smusd +#define __SMUSDX __builtin_arm_smusdx +#define __SMLSD __builtin_arm_smlsd +#define __SMLSDX __builtin_arm_smlsdx +#define __SMLSLD __builtin_arm_smlsld +#define __SMLSLDX __builtin_arm_smlsldx +#define __SEL __builtin_arm_sel +#define __QADD __builtin_arm_qadd +#define __QSUB __builtin_arm_qsub + +#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ + ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) + +#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ + ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) + +#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) + +__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) +{ + int32_t result; + + __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +#endif /* (__ARM_FEATURE_DSP == 1) */ +/*@} end of group CMSIS_SIMD_intrinsics */ + + +#endif /* __CMSIS_ARMCLANG_H */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_armclang_ltm.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_armclang_ltm.h new file mode 100755 index 00000000000..0e5c7349d3e --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_armclang_ltm.h @@ -0,0 +1,1893 @@ +/**************************************************************************//** + * @file cmsis_armclang_ltm.h + * @brief CMSIS compiler armclang (Arm Compiler 6) header file + * @version V1.3.0 + * @date 26. March 2020 + ******************************************************************************/ +/* + * Copyright (c) 2018-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*lint -esym(9058, IRQn)*/ /* disable MISRA 2012 Rule 2.4 for IRQn */ + +#ifndef __CMSIS_ARMCLANG_H +#define __CMSIS_ARMCLANG_H + +#pragma clang system_header /* treat file as system include file */ + +#ifndef __ARM_COMPAT_H +#include /* Compatibility header for Arm Compiler 5 intrinsics */ +#endif + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM __asm +#endif +#ifndef __INLINE + #define __INLINE __inline +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static __inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline +#endif +#ifndef __NO_RETURN + #define __NO_RETURN __attribute__((__noreturn__)) +#endif +#ifndef __USED + #define __USED __attribute__((used)) +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed, aligned(1))) +#endif +#ifndef __UNALIGNED_UINT32 /* deprecated */ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32 */ + struct __attribute__((packed)) T_UINT32 { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) +#endif +#ifndef __UNALIGNED_UINT16_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT16_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_WRITE */ + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT16_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT16_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_READ */ + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) +#endif +#ifndef __UNALIGNED_UINT32_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_WRITE */ + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT32_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" +/*lint -esym(9058, T_UINT32_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_READ */ + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) +#endif +#ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) +#endif +#ifndef __RESTRICT + #define __RESTRICT __restrict +#endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif + +/* ######################### Startup and Lowlevel Init ######################## */ + +#ifndef __PROGRAM_START +#define __PROGRAM_START __main +#endif + +#ifndef __INITIAL_SP +#define __INITIAL_SP Image$$ARM_LIB_STACK$$ZI$$Limit +#endif + +#ifndef __STACK_LIMIT +#define __STACK_LIMIT Image$$ARM_LIB_STACK$$ZI$$Base +#endif + +#ifndef __VECTOR_TABLE +#define __VECTOR_TABLE __Vectors +#endif + +#ifndef __VECTOR_TABLE_ATTRIBUTE +#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET"))) +#endif + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +/* intrinsic void __enable_irq(); see arm_compat.h */ + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +/* intrinsic void __disable_irq(); see arm_compat.h */ + + +/** + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +{ + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +{ + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); +} +#endif + + +/** + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_IPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_APSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_xPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); +} +#endif + + +/** + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); +} +#endif + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return(result); +} + + +/** + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) +{ + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); +} +#endif + + +/** + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +{ + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) +{ + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); +} +#endif + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +#define __enable_fault_irq __enable_fiq /* see arm_compat.h */ + + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +#define __disable_fault_irq __disable_fiq /* see arm_compat.h */ + + +/** + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) +{ + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); +} +#endif + + +/** + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); +} + + +/** + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); +} +#endif + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) + +/** + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return result; +#endif +} + +#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return result; +#endif +} +#endif + + +/** + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); +#endif +} +#endif + + +/** + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return result; +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return result; +#endif +} +#endif + + +/** + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); +#endif +} +#endif + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __get_FPSCR (uint32_t)__builtin_arm_get_fpscr +#else +#define __get_FPSCR() ((uint32_t)0U) +#endif + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __set_FPSCR __builtin_arm_set_fpscr +#else +#define __set_FPSCR(x) ((void)(x)) +#endif + + +/*@} end of CMSIS_Core_RegAccFunctions */ + + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ + +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif + +/** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. + */ +#define __NOP __builtin_arm_nop + +/** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + */ +#define __WFI __builtin_arm_wfi + + +/** + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. + */ +#define __WFE __builtin_arm_wfe + + +/** + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + */ +#define __SEV __builtin_arm_sev + + +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +#define __ISB() __builtin_arm_isb(0xF) + +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +#define __DSB() __builtin_arm_dsb(0xF) + + +/** + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. + */ +#define __DMB() __builtin_arm_dmb(0xF) + + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV(value) __builtin_bswap32(value) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV16(value) __ROR(__REV(value), 16) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REVSH(value) (int16_t)__builtin_bswap16(value) + + +/** + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +{ + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); +} + + +/** + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __ASM volatile ("bkpt "#value) + + +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +#define __RBIT __builtin_arm_rbit + +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +{ + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees ARM-compatible results if happening to compile on a non-ARM + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); +} + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDREXB (uint8_t)__builtin_arm_ldrex + + +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDREXH (uint16_t)__builtin_arm_ldrex + + +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDREXW (uint32_t)__builtin_arm_ldrex + + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXB (uint32_t)__builtin_arm_strex + + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXH (uint32_t)__builtin_arm_strex + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXW (uint32_t)__builtin_arm_strex + + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +#define __CLREX __builtin_arm_clrex + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT __builtin_arm_ssat + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT __builtin_arm_usat + + +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +{ + uint32_t result; + + __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return(result); +} + + +/** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return(result); +} + + +/** + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); +} + +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; +} + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +/** + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t) result); +} + + +/** + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t) result); +} + + +/** + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return(result); +} + + +/** + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDAEXB (uint8_t)__builtin_arm_ldaex + + +/** + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDAEXH (uint16_t)__builtin_arm_ldaex + + +/** + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDAEX (uint32_t)__builtin_arm_ldaex + + +/** + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXB (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXH (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEX (uint32_t)__builtin_arm_stlex + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) + +__STATIC_FORCEINLINE uint32_t __SADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + + +__STATIC_FORCEINLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + + +__STATIC_FORCEINLINE uint32_t __SADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USAD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +#define __SSAT16(ARG1,ARG2) \ +({ \ + int32_t __RES, __ARG1 = (ARG1); \ + __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ + __RES; \ + }) + +#define __USAT16(ARG1,ARG2) \ +({ \ + uint32_t __RES, __ARG1 = (ARG1); \ + __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ + __RES; \ + }) + +__STATIC_FORCEINLINE uint32_t __UXTB16(uint32_t op1) +{ + uint32_t result; + + __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1)); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1) +{ + uint32_t result; + + __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1)); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint32_t __SEL (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ + ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) + +#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ + ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) + +#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) + +__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) +{ + int32_t result; + + __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +#endif /* (__ARM_FEATURE_DSP == 1) */ +/*@} end of group CMSIS_SIMD_intrinsics */ + + +#endif /* __CMSIS_ARMCLANG_H */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_compiler.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_compiler.h new file mode 100755 index 00000000000..adbf296f15a --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_compiler.h @@ -0,0 +1,283 @@ +/**************************************************************************//** + * @file cmsis_compiler.h + * @brief CMSIS compiler generic header file + * @version V5.1.0 + * @date 09. October 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_COMPILER_H +#define __CMSIS_COMPILER_H + +#include + +/* + * Arm Compiler 4/5 + */ +#if defined ( __CC_ARM ) + #include "cmsis_armcc.h" + + +/* + * Arm Compiler 6.6 LTM (armclang) + */ +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) && (__ARMCC_VERSION < 6100100) + #include "cmsis_armclang_ltm.h" + + /* + * Arm Compiler above 6.10.1 (armclang) + */ +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6100100) + #include "cmsis_armclang.h" + + +/* + * GNU Compiler + */ +#elif defined ( __GNUC__ ) + #include "cmsis_gcc.h" + + +/* + * IAR Compiler + */ +#elif defined ( __ICCARM__ ) + #include + + +/* + * TI Arm Compiler + */ +#elif defined ( __TI_ARM__ ) + #include + + #ifndef __ASM + #define __ASM __asm + #endif + #ifndef __INLINE + #define __INLINE inline + #endif + #ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline + #endif + #ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __STATIC_INLINE + #endif + #ifndef __NO_RETURN + #define __NO_RETURN __attribute__((noreturn)) + #endif + #ifndef __USED + #define __USED __attribute__((used)) + #endif + #ifndef __WEAK + #define __WEAK __attribute__((weak)) + #endif + #ifndef __PACKED + #define __PACKED __attribute__((packed)) + #endif + #ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __attribute__((packed)) + #endif + #ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed)) + #endif + #ifndef __UNALIGNED_UINT32 /* deprecated */ + struct __attribute__((packed)) T_UINT32 { uint32_t v; }; + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) + #endif + #ifndef __UNALIGNED_UINT16_WRITE + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void*)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT16_READ + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) + #endif + #ifndef __UNALIGNED_UINT32_WRITE + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT32_READ + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) + #endif + #ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) + #endif + #ifndef __RESTRICT + #define __RESTRICT __restrict + #endif + #ifndef __COMPILER_BARRIER + #warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored. + #define __COMPILER_BARRIER() (void)0 + #endif + + +/* + * TASKING Compiler + */ +#elif defined ( __TASKING__ ) + /* + * The CMSIS functions have been implemented as intrinsics in the compiler. + * Please use "carm -?i" to get an up to date list of all intrinsics, + * Including the CMSIS ones. + */ + + #ifndef __ASM + #define __ASM __asm + #endif + #ifndef __INLINE + #define __INLINE inline + #endif + #ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline + #endif + #ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __STATIC_INLINE + #endif + #ifndef __NO_RETURN + #define __NO_RETURN __attribute__((noreturn)) + #endif + #ifndef __USED + #define __USED __attribute__((used)) + #endif + #ifndef __WEAK + #define __WEAK __attribute__((weak)) + #endif + #ifndef __PACKED + #define __PACKED __packed__ + #endif + #ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __packed__ + #endif + #ifndef __PACKED_UNION + #define __PACKED_UNION union __packed__ + #endif + #ifndef __UNALIGNED_UINT32 /* deprecated */ + struct __packed__ T_UINT32 { uint32_t v; }; + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) + #endif + #ifndef __UNALIGNED_UINT16_WRITE + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT16_READ + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) + #endif + #ifndef __UNALIGNED_UINT32_WRITE + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT32_READ + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) + #endif + #ifndef __ALIGNED + #define __ALIGNED(x) __align(x) + #endif + #ifndef __RESTRICT + #warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored. + #define __RESTRICT + #endif + #ifndef __COMPILER_BARRIER + #warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored. + #define __COMPILER_BARRIER() (void)0 + #endif + + +/* + * COSMIC Compiler + */ +#elif defined ( __CSMC__ ) + #include + + #ifndef __ASM + #define __ASM _asm + #endif + #ifndef __INLINE + #define __INLINE inline + #endif + #ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline + #endif + #ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __STATIC_INLINE + #endif + #ifndef __NO_RETURN + // NO RETURN is automatically detected hence no warning here + #define __NO_RETURN + #endif + #ifndef __USED + #warning No compiler specific solution for __USED. __USED is ignored. + #define __USED + #endif + #ifndef __WEAK + #define __WEAK __weak + #endif + #ifndef __PACKED + #define __PACKED @packed + #endif + #ifndef __PACKED_STRUCT + #define __PACKED_STRUCT @packed struct + #endif + #ifndef __PACKED_UNION + #define __PACKED_UNION @packed union + #endif + #ifndef __UNALIGNED_UINT32 /* deprecated */ + @packed struct T_UINT32 { uint32_t v; }; + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) + #endif + #ifndef __UNALIGNED_UINT16_WRITE + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT16_READ + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) + #endif + #ifndef __UNALIGNED_UINT32_WRITE + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) + #endif + #ifndef __UNALIGNED_UINT32_READ + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) + #endif + #ifndef __ALIGNED + #warning No compiler specific solution for __ALIGNED. __ALIGNED is ignored. + #define __ALIGNED(x) + #endif + #ifndef __RESTRICT + #warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored. + #define __RESTRICT + #endif + #ifndef __COMPILER_BARRIER + #warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored. + #define __COMPILER_BARRIER() (void)0 + #endif + + +#else + #error Unknown compiler. +#endif + + +#endif /* __CMSIS_COMPILER_H */ + diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_gcc.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_gcc.h new file mode 100755 index 00000000000..a2778f58e8f --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_gcc.h @@ -0,0 +1,2177 @@ +/**************************************************************************//** + * @file cmsis_gcc.h + * @brief CMSIS compiler GCC header file + * @version V5.3.0 + * @date 26. March 2020 + ******************************************************************************/ +/* + * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_GCC_H +#define __CMSIS_GCC_H + +/* ignore some GCC warnings */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wsign-conversion" +#pragma GCC diagnostic ignored "-Wconversion" +#pragma GCC diagnostic ignored "-Wunused-parameter" + +/* Fallback for __has_builtin */ +#ifndef __has_builtin + #define __has_builtin(x) (0) +#endif + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM __asm +#endif +#ifndef __INLINE + #define __INLINE inline +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline +#endif +#ifndef __NO_RETURN + #define __NO_RETURN __attribute__((__noreturn__)) +#endif +#ifndef __USED + #define __USED __attribute__((used)) +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed, aligned(1))) +#endif +#ifndef __UNALIGNED_UINT32 /* deprecated */ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + struct __attribute__((packed)) T_UINT32 { uint32_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v) +#endif +#ifndef __UNALIGNED_UINT16_WRITE + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT16_READ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) +#endif +#ifndef __UNALIGNED_UINT32_WRITE + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT32_READ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) +#endif +#ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) +#endif +#ifndef __RESTRICT + #define __RESTRICT __restrict +#endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif + +/* ######################### Startup and Lowlevel Init ######################## */ + +#ifndef __PROGRAM_START + +/** + \brief Initializes data and bss sections + \details This default implementations initialized all data and additional bss + sections relying on .copy.table and .zero.table specified properly + in the used linker script. + + */ +__STATIC_FORCEINLINE __NO_RETURN void __cmsis_start(void) +{ + extern void _start(void) __NO_RETURN; + + typedef struct { + uint32_t const* src; + uint32_t* dest; + uint32_t wlen; + } __copy_table_t; + + typedef struct { + uint32_t* dest; + uint32_t wlen; + } __zero_table_t; + + extern const __copy_table_t __copy_table_start__; + extern const __copy_table_t __copy_table_end__; + extern const __zero_table_t __zero_table_start__; + extern const __zero_table_t __zero_table_end__; + + for (__copy_table_t const* pTable = &__copy_table_start__; pTable < &__copy_table_end__; ++pTable) { + for(uint32_t i=0u; iwlen; ++i) { + pTable->dest[i] = pTable->src[i]; + } + } + + for (__zero_table_t const* pTable = &__zero_table_start__; pTable < &__zero_table_end__; ++pTable) { + for(uint32_t i=0u; iwlen; ++i) { + pTable->dest[i] = 0u; + } + } + + _start(); +} + +#define __PROGRAM_START __cmsis_start +#endif + +#ifndef __INITIAL_SP +#define __INITIAL_SP __StackTop +#endif + +#ifndef __STACK_LIMIT +#define __STACK_LIMIT __StackLimit +#endif + +#ifndef __VECTOR_TABLE +#define __VECTOR_TABLE __Vectors +#endif + +#ifndef __VECTOR_TABLE_ATTRIBUTE +#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section(".vectors"))) +#endif + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("cpsie i" : : : "memory"); +} + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("cpsid i" : : : "memory"); +} + + +/** + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +{ + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set + */ +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +{ + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); +} +#endif + + +/** + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_IPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_APSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_xPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); +} +#endif + + +/** + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); +} +#endif + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return(result); +} + + +/** + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) +{ + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); +} +#endif + + +/** + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +{ + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask + */ +__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) +{ + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); +} +#endif + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_fault_irq(void) +{ + __ASM volatile ("cpsie f" : : : "memory"); +} + + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_fault_irq(void) +{ + __ASM volatile ("cpsid f" : : : "memory"); +} + + +/** + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) +{ + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); +} +#endif + + +/** + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); +} + + +/** + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + return(result); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return(result); +} +#endif + + +/** + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); +} +#endif + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) + +/** + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return result; +#endif +} + +#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return result; +#endif +} +#endif + + +/** + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); +#endif +} +#endif + + +/** + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return result; +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return result; +#endif +} +#endif + + +/** + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); +#endif +} + + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); +#endif +} +#endif + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) +{ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#if __has_builtin(__builtin_arm_get_fpscr) +// Re-enable using built-in when GCC has been fixed +// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) + /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ + return __builtin_arm_get_fpscr(); +#else + uint32_t result; + + __ASM volatile ("VMRS %0, fpscr" : "=r" (result) ); + return(result); +#endif +#else + return(0U); +#endif +} + + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) +{ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#if __has_builtin(__builtin_arm_set_fpscr) +// Re-enable using built-in when GCC has been fixed +// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) + /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ + __builtin_arm_set_fpscr(fpscr); +#else + __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory"); +#endif +#else + (void)fpscr; +#endif +} + + +/*@} end of CMSIS_Core_RegAccFunctions */ + + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ + +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_RW_REG(r) "+l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_RW_REG(r) "+r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif + +/** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. + */ +#define __NOP() __ASM volatile ("nop") + +/** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + */ +#define __WFI() __ASM volatile ("wfi":::"memory") + + +/** + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. + */ +#define __WFE() __ASM volatile ("wfe":::"memory") + + +/** + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + */ +#define __SEV() __ASM volatile ("sev") + + +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +__STATIC_FORCEINLINE void __ISB(void) +{ + __ASM volatile ("isb 0xF":::"memory"); +} + + +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +__STATIC_FORCEINLINE void __DSB(void) +{ + __ASM volatile ("dsb 0xF":::"memory"); +} + + +/** + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. + */ +__STATIC_FORCEINLINE void __DMB(void) +{ + __ASM volatile ("dmb 0xF":::"memory"); +} + + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE uint32_t __REV(uint32_t value) +{ +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) + return __builtin_bswap32(value); +#else + uint32_t result; + + __ASM ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; +#endif +} + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) +{ + uint32_t result; + + __ASM ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; +} + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE int16_t __REVSH(int16_t value) +{ +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + return (int16_t)__builtin_bswap16(value); +#else + int16_t result; + + __ASM ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; +#endif +} + + +/** + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +{ + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); +} + + +/** + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __ASM volatile ("bkpt "#value) + + +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) +{ + uint32_t result; + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) + __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) ); +#else + uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ + + result = value; /* r will be reversed bits of v; first get LSB of v */ + for (value >>= 1U; value != 0U; value >>= 1U) + { + result <<= 1U; + result |= value & 1U; + s--; + } + result <<= s; /* shift when v's highest bits are zero */ +#endif + return result; +} + + +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +{ + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees ARM-compatible results if happening to compile on a non-ARM + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); +} + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) +{ + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); +#endif + return ((uint8_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) +{ + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); +#endif + return ((uint16_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) +{ + uint32_t result; + + __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); + return(result); +} + + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) +{ + uint32_t result; + + __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); + return(result); +} + + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) +{ + uint32_t result; + + __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); + return(result); +} + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) +{ + uint32_t result; + + __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); + return(result); +} + + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +__STATIC_FORCEINLINE void __CLREX(void) +{ + __ASM volatile ("clrex" ::: "memory"); +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] ARG1 Value to be saturated + \param [in] ARG2 Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT(ARG1, ARG2) \ +__extension__ \ +({ \ + int32_t __RES, __ARG1 = (ARG1); \ + __ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ + __RES; \ + }) + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] ARG1 Value to be saturated + \param [in] ARG2 Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT(ARG1, ARG2) \ + __extension__ \ +({ \ + uint32_t __RES, __ARG1 = (ARG1); \ + __ASM volatile ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ + __RES; \ + }) + + +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +{ + uint32_t result; + + __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return(result); +} + + +/** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +{ + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); +#endif + return ((uint8_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +{ + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); +#endif + return ((uint16_t) result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return(result); +} + + +/** + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); +} + +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; +} + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +/** + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t) result); +} + + +/** + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t) result); +} + + +/** + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return(result); +} + + +/** + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAEXB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t) result); +} + + +/** + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDAEXH(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t) result); +} + + +/** + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDAEX(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return(result); +} + + +/** + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + return(result); +} + + +/** + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + return(result); +} + + +/** + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + return(result); +} + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) + +__STATIC_FORCEINLINE uint32_t __SADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + + +__STATIC_FORCEINLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + + +__STATIC_FORCEINLINE uint32_t __SADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHASX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USAD8(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +#define __SSAT16(ARG1, ARG2) \ +({ \ + int32_t __RES, __ARG1 = (ARG1); \ + __ASM volatile ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ + __RES; \ + }) + +#define __USAT16(ARG1, ARG2) \ +({ \ + uint32_t __RES, __ARG1 = (ARG1); \ + __ASM volatile ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ + __RES; \ + }) + +__STATIC_FORCEINLINE uint32_t __UXTB16(uint32_t op1) +{ + uint32_t result; + + __ASM ("uxtb16 %0, %1" : "=r" (result) : "r" (op1)); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1) +{ + uint32_t result; + + __ASM ("sxtb16 %0, %1" : "=r" (result) : "r" (op1)); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SXTB16_RORn(uint32_t op1, uint32_t rotate) +{ + uint32_t result; + + __ASM ("sxtb16 %0, %1, ROR %2" : "=r" (result) : "r" (op1), "i" (rotate) ); + + return result; +} + +__STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3) +{ + uint32_t result; + + __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +__STATIC_FORCEINLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc) +{ + union llreg_u{ + uint32_t w32[2]; + uint64_t w64; + } llr; + llr.w64 = acc; + +#ifndef __ARMEB__ /* Little endian */ + __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); +#else /* Big endian */ + __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); +#endif + + return(llr.w64); +} + +__STATIC_FORCEINLINE uint32_t __SEL (uint32_t op1, uint32_t op2) +{ + uint32_t result; + + __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +__STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) +{ + int32_t result; + + __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); + return(result); +} + +#if 0 +#define __PKHBT(ARG1,ARG2,ARG3) \ +({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ + __RES; \ + }) + +#define __PKHTB(ARG1,ARG2,ARG3) \ +({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + if (ARG3 == 0) \ + __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ + else \ + __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ + __RES; \ + }) +#endif + +#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ + ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) + +#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ + ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) + +__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) +{ + int32_t result; + + __ASM ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); + return(result); +} + +#endif /* (__ARM_FEATURE_DSP == 1) */ +/*@} end of group CMSIS_SIMD_intrinsics */ + + +#pragma GCC diagnostic pop + +#endif /* __CMSIS_GCC_H */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_iccarm.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_iccarm.h new file mode 100755 index 00000000000..7eeffca5c71 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_iccarm.h @@ -0,0 +1,968 @@ +/**************************************************************************//** + * @file cmsis_iccarm.h + * @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file + * @version V5.2.0 + * @date 28. January 2020 + ******************************************************************************/ + +//------------------------------------------------------------------------------ +// +// Copyright (c) 2017-2019 IAR Systems +// Copyright (c) 2017-2019 Arm Limited. All rights reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License") +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//------------------------------------------------------------------------------ + + +#ifndef __CMSIS_ICCARM_H__ +#define __CMSIS_ICCARM_H__ + +#ifndef __ICCARM__ + #error This file should only be compiled by ICCARM +#endif + +#pragma system_include + +#define __IAR_FT _Pragma("inline=forced") __intrinsic + +#if (__VER__ >= 8000000) + #define __ICCARM_V8 1 +#else + #define __ICCARM_V8 0 +#endif + +#ifndef __ALIGNED + #if __ICCARM_V8 + #define __ALIGNED(x) __attribute__((aligned(x))) + #elif (__VER__ >= 7080000) + /* Needs IAR language extensions */ + #define __ALIGNED(x) __attribute__((aligned(x))) + #else + #warning No compiler specific solution for __ALIGNED.__ALIGNED is ignored. + #define __ALIGNED(x) + #endif +#endif + + +/* Define compiler macros for CPU architecture, used in CMSIS 5. + */ +#if __ARM_ARCH_6M__ || __ARM_ARCH_7M__ || __ARM_ARCH_7EM__ || __ARM_ARCH_8M_BASE__ || __ARM_ARCH_8M_MAIN__ +/* Macros already defined */ +#else + #if defined(__ARM8M_MAINLINE__) || defined(__ARM8EM_MAINLINE__) + #define __ARM_ARCH_8M_MAIN__ 1 + #elif defined(__ARM8M_BASELINE__) + #define __ARM_ARCH_8M_BASE__ 1 + #elif defined(__ARM_ARCH_PROFILE) && __ARM_ARCH_PROFILE == 'M' + #if __ARM_ARCH == 6 + #define __ARM_ARCH_6M__ 1 + #elif __ARM_ARCH == 7 + #if __ARM_FEATURE_DSP + #define __ARM_ARCH_7EM__ 1 + #else + #define __ARM_ARCH_7M__ 1 + #endif + #endif /* __ARM_ARCH */ + #endif /* __ARM_ARCH_PROFILE == 'M' */ +#endif + +/* Alternativ core deduction for older ICCARM's */ +#if !defined(__ARM_ARCH_6M__) && !defined(__ARM_ARCH_7M__) && !defined(__ARM_ARCH_7EM__) && \ + !defined(__ARM_ARCH_8M_BASE__) && !defined(__ARM_ARCH_8M_MAIN__) + #if defined(__ARM6M__) && (__CORE__ == __ARM6M__) + #define __ARM_ARCH_6M__ 1 + #elif defined(__ARM7M__) && (__CORE__ == __ARM7M__) + #define __ARM_ARCH_7M__ 1 + #elif defined(__ARM7EM__) && (__CORE__ == __ARM7EM__) + #define __ARM_ARCH_7EM__ 1 + #elif defined(__ARM8M_BASELINE__) && (__CORE == __ARM8M_BASELINE__) + #define __ARM_ARCH_8M_BASE__ 1 + #elif defined(__ARM8M_MAINLINE__) && (__CORE == __ARM8M_MAINLINE__) + #define __ARM_ARCH_8M_MAIN__ 1 + #elif defined(__ARM8EM_MAINLINE__) && (__CORE == __ARM8EM_MAINLINE__) + #define __ARM_ARCH_8M_MAIN__ 1 + #else + #error "Unknown target." + #endif +#endif + + + +#if defined(__ARM_ARCH_6M__) && __ARM_ARCH_6M__==1 + #define __IAR_M0_FAMILY 1 +#elif defined(__ARM_ARCH_8M_BASE__) && __ARM_ARCH_8M_BASE__==1 + #define __IAR_M0_FAMILY 1 +#else + #define __IAR_M0_FAMILY 0 +#endif + + +#ifndef __ASM + #define __ASM __asm +#endif + +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif + +#ifndef __INLINE + #define __INLINE inline +#endif + +#ifndef __NO_RETURN + #if __ICCARM_V8 + #define __NO_RETURN __attribute__((__noreturn__)) + #else + #define __NO_RETURN _Pragma("object_attribute=__noreturn") + #endif +#endif + +#ifndef __PACKED + #if __ICCARM_V8 + #define __PACKED __attribute__((packed, aligned(1))) + #else + /* Needs IAR language extensions */ + #define __PACKED __packed + #endif +#endif + +#ifndef __PACKED_STRUCT + #if __ICCARM_V8 + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) + #else + /* Needs IAR language extensions */ + #define __PACKED_STRUCT __packed struct + #endif +#endif + +#ifndef __PACKED_UNION + #if __ICCARM_V8 + #define __PACKED_UNION union __attribute__((packed, aligned(1))) + #else + /* Needs IAR language extensions */ + #define __PACKED_UNION __packed union + #endif +#endif + +#ifndef __RESTRICT + #if __ICCARM_V8 + #define __RESTRICT __restrict + #else + /* Needs IAR language extensions */ + #define __RESTRICT restrict + #endif +#endif + +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline +#endif + +#ifndef __FORCEINLINE + #define __FORCEINLINE _Pragma("inline=forced") +#endif + +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __FORCEINLINE __STATIC_INLINE +#endif + +#ifndef __UNALIGNED_UINT16_READ +#pragma language=save +#pragma language=extended +__IAR_FT uint16_t __iar_uint16_read(void const *ptr) +{ + return *(__packed uint16_t*)(ptr); +} +#pragma language=restore +#define __UNALIGNED_UINT16_READ(PTR) __iar_uint16_read(PTR) +#endif + + +#ifndef __UNALIGNED_UINT16_WRITE +#pragma language=save +#pragma language=extended +__IAR_FT void __iar_uint16_write(void const *ptr, uint16_t val) +{ + *(__packed uint16_t*)(ptr) = val;; +} +#pragma language=restore +#define __UNALIGNED_UINT16_WRITE(PTR,VAL) __iar_uint16_write(PTR,VAL) +#endif + +#ifndef __UNALIGNED_UINT32_READ +#pragma language=save +#pragma language=extended +__IAR_FT uint32_t __iar_uint32_read(void const *ptr) +{ + return *(__packed uint32_t*)(ptr); +} +#pragma language=restore +#define __UNALIGNED_UINT32_READ(PTR) __iar_uint32_read(PTR) +#endif + +#ifndef __UNALIGNED_UINT32_WRITE +#pragma language=save +#pragma language=extended +__IAR_FT void __iar_uint32_write(void const *ptr, uint32_t val) +{ + *(__packed uint32_t*)(ptr) = val;; +} +#pragma language=restore +#define __UNALIGNED_UINT32_WRITE(PTR,VAL) __iar_uint32_write(PTR,VAL) +#endif + +#ifndef __UNALIGNED_UINT32 /* deprecated */ +#pragma language=save +#pragma language=extended +__packed struct __iar_u32 { uint32_t v; }; +#pragma language=restore +#define __UNALIGNED_UINT32(PTR) (((struct __iar_u32 *)(PTR))->v) +#endif + +#ifndef __USED + #if __ICCARM_V8 + #define __USED __attribute__((used)) + #else + #define __USED _Pragma("__root") + #endif +#endif + +#ifndef __WEAK + #if __ICCARM_V8 + #define __WEAK __attribute__((weak)) + #else + #define __WEAK _Pragma("__weak") + #endif +#endif + +#ifndef __PROGRAM_START +#define __PROGRAM_START __iar_program_start +#endif + +#ifndef __INITIAL_SP +#define __INITIAL_SP CSTACK$$Limit +#endif + +#ifndef __STACK_LIMIT +#define __STACK_LIMIT CSTACK$$Base +#endif + +#ifndef __VECTOR_TABLE +#define __VECTOR_TABLE __vector_table +#endif + +#ifndef __VECTOR_TABLE_ATTRIBUTE +#define __VECTOR_TABLE_ATTRIBUTE @".intvec" +#endif + +#ifndef __ICCARM_INTRINSICS_VERSION__ + #define __ICCARM_INTRINSICS_VERSION__ 0 +#endif + +#if __ICCARM_INTRINSICS_VERSION__ == 2 + + #if defined(__CLZ) + #undef __CLZ + #endif + #if defined(__REVSH) + #undef __REVSH + #endif + #if defined(__RBIT) + #undef __RBIT + #endif + #if defined(__SSAT) + #undef __SSAT + #endif + #if defined(__USAT) + #undef __USAT + #endif + + #include "iccarm_builtin.h" + + #define __disable_fault_irq __iar_builtin_disable_fiq + #define __disable_irq __iar_builtin_disable_interrupt + #define __enable_fault_irq __iar_builtin_enable_fiq + #define __enable_irq __iar_builtin_enable_interrupt + #define __arm_rsr __iar_builtin_rsr + #define __arm_wsr __iar_builtin_wsr + + + #define __get_APSR() (__arm_rsr("APSR")) + #define __get_BASEPRI() (__arm_rsr("BASEPRI")) + #define __get_CONTROL() (__arm_rsr("CONTROL")) + #define __get_FAULTMASK() (__arm_rsr("FAULTMASK")) + + #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) + #define __get_FPSCR() (__arm_rsr("FPSCR")) + #define __set_FPSCR(VALUE) (__arm_wsr("FPSCR", (VALUE))) + #else + #define __get_FPSCR() ( 0 ) + #define __set_FPSCR(VALUE) ((void)VALUE) + #endif + + #define __get_IPSR() (__arm_rsr("IPSR")) + #define __get_MSP() (__arm_rsr("MSP")) + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + #define __get_MSPLIM() (0U) + #else + #define __get_MSPLIM() (__arm_rsr("MSPLIM")) + #endif + #define __get_PRIMASK() (__arm_rsr("PRIMASK")) + #define __get_PSP() (__arm_rsr("PSP")) + + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + #define __get_PSPLIM() (0U) + #else + #define __get_PSPLIM() (__arm_rsr("PSPLIM")) + #endif + + #define __get_xPSR() (__arm_rsr("xPSR")) + + #define __set_BASEPRI(VALUE) (__arm_wsr("BASEPRI", (VALUE))) + #define __set_BASEPRI_MAX(VALUE) (__arm_wsr("BASEPRI_MAX", (VALUE))) + #define __set_CONTROL(VALUE) (__arm_wsr("CONTROL", (VALUE))) + #define __set_FAULTMASK(VALUE) (__arm_wsr("FAULTMASK", (VALUE))) + #define __set_MSP(VALUE) (__arm_wsr("MSP", (VALUE))) + + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + #define __set_MSPLIM(VALUE) ((void)(VALUE)) + #else + #define __set_MSPLIM(VALUE) (__arm_wsr("MSPLIM", (VALUE))) + #endif + #define __set_PRIMASK(VALUE) (__arm_wsr("PRIMASK", (VALUE))) + #define __set_PSP(VALUE) (__arm_wsr("PSP", (VALUE))) + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + #define __set_PSPLIM(VALUE) ((void)(VALUE)) + #else + #define __set_PSPLIM(VALUE) (__arm_wsr("PSPLIM", (VALUE))) + #endif + + #define __TZ_get_CONTROL_NS() (__arm_rsr("CONTROL_NS")) + #define __TZ_set_CONTROL_NS(VALUE) (__arm_wsr("CONTROL_NS", (VALUE))) + #define __TZ_get_PSP_NS() (__arm_rsr("PSP_NS")) + #define __TZ_set_PSP_NS(VALUE) (__arm_wsr("PSP_NS", (VALUE))) + #define __TZ_get_MSP_NS() (__arm_rsr("MSP_NS")) + #define __TZ_set_MSP_NS(VALUE) (__arm_wsr("MSP_NS", (VALUE))) + #define __TZ_get_SP_NS() (__arm_rsr("SP_NS")) + #define __TZ_set_SP_NS(VALUE) (__arm_wsr("SP_NS", (VALUE))) + #define __TZ_get_PRIMASK_NS() (__arm_rsr("PRIMASK_NS")) + #define __TZ_set_PRIMASK_NS(VALUE) (__arm_wsr("PRIMASK_NS", (VALUE))) + #define __TZ_get_BASEPRI_NS() (__arm_rsr("BASEPRI_NS")) + #define __TZ_set_BASEPRI_NS(VALUE) (__arm_wsr("BASEPRI_NS", (VALUE))) + #define __TZ_get_FAULTMASK_NS() (__arm_rsr("FAULTMASK_NS")) + #define __TZ_set_FAULTMASK_NS(VALUE)(__arm_wsr("FAULTMASK_NS", (VALUE))) + + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + #define __TZ_get_PSPLIM_NS() (0U) + #define __TZ_set_PSPLIM_NS(VALUE) ((void)(VALUE)) + #else + #define __TZ_get_PSPLIM_NS() (__arm_rsr("PSPLIM_NS")) + #define __TZ_set_PSPLIM_NS(VALUE) (__arm_wsr("PSPLIM_NS", (VALUE))) + #endif + + #define __TZ_get_MSPLIM_NS() (__arm_rsr("MSPLIM_NS")) + #define __TZ_set_MSPLIM_NS(VALUE) (__arm_wsr("MSPLIM_NS", (VALUE))) + + #define __NOP __iar_builtin_no_operation + + #define __CLZ __iar_builtin_CLZ + #define __CLREX __iar_builtin_CLREX + + #define __DMB __iar_builtin_DMB + #define __DSB __iar_builtin_DSB + #define __ISB __iar_builtin_ISB + + #define __LDREXB __iar_builtin_LDREXB + #define __LDREXH __iar_builtin_LDREXH + #define __LDREXW __iar_builtin_LDREX + + #define __RBIT __iar_builtin_RBIT + #define __REV __iar_builtin_REV + #define __REV16 __iar_builtin_REV16 + + __IAR_FT int16_t __REVSH(int16_t val) + { + return (int16_t) __iar_builtin_REVSH(val); + } + + #define __ROR __iar_builtin_ROR + #define __RRX __iar_builtin_RRX + + #define __SEV __iar_builtin_SEV + + #if !__IAR_M0_FAMILY + #define __SSAT __iar_builtin_SSAT + #endif + + #define __STREXB __iar_builtin_STREXB + #define __STREXH __iar_builtin_STREXH + #define __STREXW __iar_builtin_STREX + + #if !__IAR_M0_FAMILY + #define __USAT __iar_builtin_USAT + #endif + + #define __WFE __iar_builtin_WFE + #define __WFI __iar_builtin_WFI + + #if __ARM_MEDIA__ + #define __SADD8 __iar_builtin_SADD8 + #define __QADD8 __iar_builtin_QADD8 + #define __SHADD8 __iar_builtin_SHADD8 + #define __UADD8 __iar_builtin_UADD8 + #define __UQADD8 __iar_builtin_UQADD8 + #define __UHADD8 __iar_builtin_UHADD8 + #define __SSUB8 __iar_builtin_SSUB8 + #define __QSUB8 __iar_builtin_QSUB8 + #define __SHSUB8 __iar_builtin_SHSUB8 + #define __USUB8 __iar_builtin_USUB8 + #define __UQSUB8 __iar_builtin_UQSUB8 + #define __UHSUB8 __iar_builtin_UHSUB8 + #define __SADD16 __iar_builtin_SADD16 + #define __QADD16 __iar_builtin_QADD16 + #define __SHADD16 __iar_builtin_SHADD16 + #define __UADD16 __iar_builtin_UADD16 + #define __UQADD16 __iar_builtin_UQADD16 + #define __UHADD16 __iar_builtin_UHADD16 + #define __SSUB16 __iar_builtin_SSUB16 + #define __QSUB16 __iar_builtin_QSUB16 + #define __SHSUB16 __iar_builtin_SHSUB16 + #define __USUB16 __iar_builtin_USUB16 + #define __UQSUB16 __iar_builtin_UQSUB16 + #define __UHSUB16 __iar_builtin_UHSUB16 + #define __SASX __iar_builtin_SASX + #define __QASX __iar_builtin_QASX + #define __SHASX __iar_builtin_SHASX + #define __UASX __iar_builtin_UASX + #define __UQASX __iar_builtin_UQASX + #define __UHASX __iar_builtin_UHASX + #define __SSAX __iar_builtin_SSAX + #define __QSAX __iar_builtin_QSAX + #define __SHSAX __iar_builtin_SHSAX + #define __USAX __iar_builtin_USAX + #define __UQSAX __iar_builtin_UQSAX + #define __UHSAX __iar_builtin_UHSAX + #define __USAD8 __iar_builtin_USAD8 + #define __USADA8 __iar_builtin_USADA8 + #define __SSAT16 __iar_builtin_SSAT16 + #define __USAT16 __iar_builtin_USAT16 + #define __UXTB16 __iar_builtin_UXTB16 + #define __UXTAB16 __iar_builtin_UXTAB16 + #define __SXTB16 __iar_builtin_SXTB16 + #define __SXTAB16 __iar_builtin_SXTAB16 + #define __SMUAD __iar_builtin_SMUAD + #define __SMUADX __iar_builtin_SMUADX + #define __SMMLA __iar_builtin_SMMLA + #define __SMLAD __iar_builtin_SMLAD + #define __SMLADX __iar_builtin_SMLADX + #define __SMLALD __iar_builtin_SMLALD + #define __SMLALDX __iar_builtin_SMLALDX + #define __SMUSD __iar_builtin_SMUSD + #define __SMUSDX __iar_builtin_SMUSDX + #define __SMLSD __iar_builtin_SMLSD + #define __SMLSDX __iar_builtin_SMLSDX + #define __SMLSLD __iar_builtin_SMLSLD + #define __SMLSLDX __iar_builtin_SMLSLDX + #define __SEL __iar_builtin_SEL + #define __QADD __iar_builtin_QADD + #define __QSUB __iar_builtin_QSUB + #define __PKHBT __iar_builtin_PKHBT + #define __PKHTB __iar_builtin_PKHTB + #endif + +#else /* __ICCARM_INTRINSICS_VERSION__ == 2 */ + + #if __IAR_M0_FAMILY + /* Avoid clash between intrinsics.h and arm_math.h when compiling for Cortex-M0. */ + #define __CLZ __cmsis_iar_clz_not_active + #define __SSAT __cmsis_iar_ssat_not_active + #define __USAT __cmsis_iar_usat_not_active + #define __RBIT __cmsis_iar_rbit_not_active + #define __get_APSR __cmsis_iar_get_APSR_not_active + #endif + + + #if (!((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) )) + #define __get_FPSCR __cmsis_iar_get_FPSR_not_active + #define __set_FPSCR __cmsis_iar_set_FPSR_not_active + #endif + + #ifdef __INTRINSICS_INCLUDED + #error intrinsics.h is already included previously! + #endif + + #include + + #if __IAR_M0_FAMILY + /* Avoid clash between intrinsics.h and arm_math.h when compiling for Cortex-M0. */ + #undef __CLZ + #undef __SSAT + #undef __USAT + #undef __RBIT + #undef __get_APSR + + __STATIC_INLINE uint8_t __CLZ(uint32_t data) + { + if (data == 0U) { return 32U; } + + uint32_t count = 0U; + uint32_t mask = 0x80000000U; + + while ((data & mask) == 0U) + { + count += 1U; + mask = mask >> 1U; + } + return count; + } + + __STATIC_INLINE uint32_t __RBIT(uint32_t v) + { + uint8_t sc = 31U; + uint32_t r = v; + for (v >>= 1U; v; v >>= 1U) + { + r <<= 1U; + r |= v & 1U; + sc--; + } + return (r << sc); + } + + __STATIC_INLINE uint32_t __get_APSR(void) + { + uint32_t res; + __asm("MRS %0,APSR" : "=r" (res)); + return res; + } + + #endif + + #if (!((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) )) + #undef __get_FPSCR + #undef __set_FPSCR + #define __get_FPSCR() (0) + #define __set_FPSCR(VALUE) ((void)VALUE) + #endif + + #pragma diag_suppress=Pe940 + #pragma diag_suppress=Pe177 + + #define __enable_irq __enable_interrupt + #define __disable_irq __disable_interrupt + #define __NOP __no_operation + + #define __get_xPSR __get_PSR + + #if (!defined(__ARM_ARCH_6M__) || __ARM_ARCH_6M__==0) + + __IAR_FT uint32_t __LDREXW(uint32_t volatile *ptr) + { + return __LDREX((unsigned long *)ptr); + } + + __IAR_FT uint32_t __STREXW(uint32_t value, uint32_t volatile *ptr) + { + return __STREX(value, (unsigned long *)ptr); + } + #endif + + + /* __CORTEX_M is defined in core_cm0.h, core_cm3.h and core_cm4.h. */ + #if (__CORTEX_M >= 0x03) + + __IAR_FT uint32_t __RRX(uint32_t value) + { + uint32_t result; + __ASM volatile("RRX %0, %1" : "=r"(result) : "r" (value)); + return(result); + } + + __IAR_FT void __set_BASEPRI_MAX(uint32_t value) + { + __asm volatile("MSR BASEPRI_MAX,%0"::"r" (value)); + } + + + #define __enable_fault_irq __enable_fiq + #define __disable_fault_irq __disable_fiq + + + #endif /* (__CORTEX_M >= 0x03) */ + + __IAR_FT uint32_t __ROR(uint32_t op1, uint32_t op2) + { + return (op1 >> op2) | (op1 << ((sizeof(op1)*8)-op2)); + } + + #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) + + __IAR_FT uint32_t __get_MSPLIM(void) + { + uint32_t res; + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + res = 0U; + #else + __asm volatile("MRS %0,MSPLIM" : "=r" (res)); + #endif + return res; + } + + __IAR_FT void __set_MSPLIM(uint32_t value) + { + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)value; + #else + __asm volatile("MSR MSPLIM,%0" :: "r" (value)); + #endif + } + + __IAR_FT uint32_t __get_PSPLIM(void) + { + uint32_t res; + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + res = 0U; + #else + __asm volatile("MRS %0,PSPLIM" : "=r" (res)); + #endif + return res; + } + + __IAR_FT void __set_PSPLIM(uint32_t value) + { + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)value; + #else + __asm volatile("MSR PSPLIM,%0" :: "r" (value)); + #endif + } + + __IAR_FT uint32_t __TZ_get_CONTROL_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,CONTROL_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_CONTROL_NS(uint32_t value) + { + __asm volatile("MSR CONTROL_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_PSP_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,PSP_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_PSP_NS(uint32_t value) + { + __asm volatile("MSR PSP_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_MSP_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,MSP_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_MSP_NS(uint32_t value) + { + __asm volatile("MSR MSP_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_SP_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,SP_NS" : "=r" (res)); + return res; + } + __IAR_FT void __TZ_set_SP_NS(uint32_t value) + { + __asm volatile("MSR SP_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_PRIMASK_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,PRIMASK_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_PRIMASK_NS(uint32_t value) + { + __asm volatile("MSR PRIMASK_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_BASEPRI_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,BASEPRI_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_BASEPRI_NS(uint32_t value) + { + __asm volatile("MSR BASEPRI_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_FAULTMASK_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,FAULTMASK_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_FAULTMASK_NS(uint32_t value) + { + __asm volatile("MSR FAULTMASK_NS,%0" :: "r" (value)); + } + + __IAR_FT uint32_t __TZ_get_PSPLIM_NS(void) + { + uint32_t res; + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + res = 0U; + #else + __asm volatile("MRS %0,PSPLIM_NS" : "=r" (res)); + #endif + return res; + } + + __IAR_FT void __TZ_set_PSPLIM_NS(uint32_t value) + { + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)value; + #else + __asm volatile("MSR PSPLIM_NS,%0" :: "r" (value)); + #endif + } + + __IAR_FT uint32_t __TZ_get_MSPLIM_NS(void) + { + uint32_t res; + __asm volatile("MRS %0,MSPLIM_NS" : "=r" (res)); + return res; + } + + __IAR_FT void __TZ_set_MSPLIM_NS(uint32_t value) + { + __asm volatile("MSR MSPLIM_NS,%0" :: "r" (value)); + } + + #endif /* __ARM_ARCH_8M_MAIN__ or __ARM_ARCH_8M_BASE__ */ + +#endif /* __ICCARM_INTRINSICS_VERSION__ == 2 */ + +#define __BKPT(value) __asm volatile ("BKPT %0" : : "i"(value)) + +#if __IAR_M0_FAMILY + __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat) + { + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; + } + + __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat) + { + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; + } +#endif + +#if (__CORTEX_M >= 0x03) /* __CORTEX_M is defined in core_cm0.h, core_cm3.h and core_cm4.h. */ + + __IAR_FT uint8_t __LDRBT(volatile uint8_t *addr) + { + uint32_t res; + __ASM volatile ("LDRBT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); + return ((uint8_t)res); + } + + __IAR_FT uint16_t __LDRHT(volatile uint16_t *addr) + { + uint32_t res; + __ASM volatile ("LDRHT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); + return ((uint16_t)res); + } + + __IAR_FT uint32_t __LDRT(volatile uint32_t *addr) + { + uint32_t res; + __ASM volatile ("LDRT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); + return res; + } + + __IAR_FT void __STRBT(uint8_t value, volatile uint8_t *addr) + { + __ASM volatile ("STRBT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory"); + } + + __IAR_FT void __STRHT(uint16_t value, volatile uint16_t *addr) + { + __ASM volatile ("STRHT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory"); + } + + __IAR_FT void __STRT(uint32_t value, volatile uint32_t *addr) + { + __ASM volatile ("STRT %1, [%0]" : : "r" (addr), "r" (value) : "memory"); + } + +#endif /* (__CORTEX_M >= 0x03) */ + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) + + + __IAR_FT uint8_t __LDAB(volatile uint8_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAB %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint8_t)res); + } + + __IAR_FT uint16_t __LDAH(volatile uint16_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAH %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint16_t)res); + } + + __IAR_FT uint32_t __LDA(volatile uint32_t *ptr) + { + uint32_t res; + __ASM volatile ("LDA %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return res; + } + + __IAR_FT void __STLB(uint8_t value, volatile uint8_t *ptr) + { + __ASM volatile ("STLB %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); + } + + __IAR_FT void __STLH(uint16_t value, volatile uint16_t *ptr) + { + __ASM volatile ("STLH %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); + } + + __IAR_FT void __STL(uint32_t value, volatile uint32_t *ptr) + { + __ASM volatile ("STL %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); + } + + __IAR_FT uint8_t __LDAEXB(volatile uint8_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAEXB %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint8_t)res); + } + + __IAR_FT uint16_t __LDAEXH(volatile uint16_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAEXH %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint16_t)res); + } + + __IAR_FT uint32_t __LDAEX(volatile uint32_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAEX %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return res; + } + + __IAR_FT uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) + { + uint32_t res; + __ASM volatile ("STLEXB %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory"); + return res; + } + + __IAR_FT uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) + { + uint32_t res; + __ASM volatile ("STLEXH %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory"); + return res; + } + + __IAR_FT uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) + { + uint32_t res; + __ASM volatile ("STLEX %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory"); + return res; + } + +#endif /* __ARM_ARCH_8M_MAIN__ or __ARM_ARCH_8M_BASE__ */ + +#undef __IAR_FT +#undef __IAR_M0_FAMILY +#undef __ICCARM_V8 + +#pragma diag_default=Pe940 +#pragma diag_default=Pe177 + +#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) + +#endif /* __CMSIS_ICCARM_H__ */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_version.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_version.h new file mode 100755 index 00000000000..2f048e4552d --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/cmsis_version.h @@ -0,0 +1,39 @@ +/**************************************************************************//** + * @file cmsis_version.h + * @brief CMSIS Core(M) Version definitions + * @version V5.0.4 + * @date 23. July 2019 + ******************************************************************************/ +/* + * Copyright (c) 2009-2019 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CMSIS_VERSION_H +#define __CMSIS_VERSION_H + +/* CMSIS Version definitions */ +#define __CM_CMSIS_VERSION_MAIN ( 5U) /*!< [31:16] CMSIS Core(M) main version */ +#define __CM_CMSIS_VERSION_SUB ( 4U) /*!< [15:0] CMSIS Core(M) sub version */ +#define __CM_CMSIS_VERSION ((__CM_CMSIS_VERSION_MAIN << 16U) | \ + __CM_CMSIS_VERSION_SUB ) /*!< CMSIS Core(M) version number */ +#endif diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_armv81mml.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_armv81mml.h new file mode 100755 index 00000000000..1ad19e215ac --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_armv81mml.h @@ -0,0 +1,4191 @@ +/**************************************************************************//** + * @file core_armv81mml.h + * @brief CMSIS Armv8.1-M Mainline Core Peripheral Access Layer Header File + * @version V1.3.1 + * @date 27. March 2020 + ******************************************************************************/ +/* + * Copyright (c) 2018-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_ARMV81MML_H_GENERIC +#define __CORE_ARMV81MML_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_ARMV81MML + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS ARMV81MML definitions */ +#define __ARMv81MML_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __ARMv81MML_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __ARMv81MML_CMSIS_VERSION ((__ARMv81MML_CMSIS_VERSION_MAIN << 16U) | \ + __ARMv81MML_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (81U) /*!< Cortex-M Core */ + +#if defined ( __CC_ARM ) + #error Legacy Arm Compiler does not support Armv8.1-M target architecture. +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_ARMV81MML_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_ARMV81MML_H_DEPENDANT +#define __CORE_ARMV81MML_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __ARMv81MML_REV + #define __ARMv81MML_REV 0x0000U + #warning "__ARMv81MML_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #if __FPU_PRESENT != 0U + #ifndef __FPU_DP + #define __FPU_DP 0U + #warning "__FPU_DP not defined in device header file; using default!" + #endif + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __PMU_PRESENT + #define __PMU_PRESENT 0U + #warning "__PMU_PRESENT not defined in device header file; using default!" + #endif + + #if __PMU_PRESENT != 0U + #ifndef __PMU_NUM_EVENTCNT + #define __PMU_NUM_EVENTCNT 2U + #warning "__PMU_NUM_EVENTCNT not defined in device header file; using default!" + #elif (__PMU_NUM_EVENTCNT > 31 || __PMU_NUM_EVENTCNT < 2) + #error "__PMU_NUM_EVENTCNT is out of range in device header file!" */ + #endif + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group ARMv81MML */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED3[92U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + __IOM uint32_t RFSR; /*!< Offset: 0x204 (R/W) RAS Fault Status Register */ + uint32_t RESERVED4[14U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_IESB_Pos 5U /*!< SCB AIRCR: Implicit ESB Enable Position */ +#define SCB_AIRCR_IESB_Msk (1UL << SCB_AIRCR_IESB_Pos) /*!< SCB AIRCR: Implicit ESB Enable Mask */ + +#define SCB_AIRCR_DIT_Pos 4U /*!< SCB AIRCR: Data Independent Timing Position */ +#define SCB_AIRCR_DIT_Msk (1UL << SCB_AIRCR_DIT_Pos) /*!< SCB AIRCR: Data Independent Timing Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_TRD_Pos 20U /*!< SCB CCR: TRD Position */ +#define SCB_CCR_TRD_Msk (1UL << SCB_CCR_TRD_Pos) /*!< SCB CCR: TRD Mask */ + +#define SCB_CCR_LOB_Pos 19U /*!< SCB CCR: LOB Position */ +#define SCB_CCR_LOB_Msk (1UL << SCB_CCR_LOB_Pos) /*!< SCB CCR: LOB Mask */ + +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_PMU_Pos 5U /*!< SCB DFSR: PMU Position */ +#define SCB_DFSR_PMU_Msk (1UL << SCB_DFSR_PMU_Pos) /*!< SCB DFSR: PMU Mask */ + +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CP7_Pos 7U /*!< SCB NSACR: CP7 Position */ +#define SCB_NSACR_CP7_Msk (1UL << SCB_NSACR_CP7_Pos) /*!< SCB NSACR: CP7 Mask */ + +#define SCB_NSACR_CP6_Pos 6U /*!< SCB NSACR: CP6 Position */ +#define SCB_NSACR_CP6_Msk (1UL << SCB_NSACR_CP6_Pos) /*!< SCB NSACR: CP6 Mask */ + +#define SCB_NSACR_CP5_Pos 5U /*!< SCB NSACR: CP5 Position */ +#define SCB_NSACR_CP5_Msk (1UL << SCB_NSACR_CP5_Pos) /*!< SCB NSACR: CP5 Mask */ + +#define SCB_NSACR_CP4_Pos 4U /*!< SCB NSACR: CP4 Position */ +#define SCB_NSACR_CP4_Msk (1UL << SCB_NSACR_CP4_Pos) /*!< SCB NSACR: CP4 Mask */ + +#define SCB_NSACR_CP3_Pos 3U /*!< SCB NSACR: CP3 Position */ +#define SCB_NSACR_CP3_Msk (1UL << SCB_NSACR_CP3_Pos) /*!< SCB NSACR: CP3 Mask */ + +#define SCB_NSACR_CP2_Pos 2U /*!< SCB NSACR: CP2 Position */ +#define SCB_NSACR_CP2_Msk (1UL << SCB_NSACR_CP2_Pos) /*!< SCB NSACR: CP2 Mask */ + +#define SCB_NSACR_CP1_Pos 1U /*!< SCB NSACR: CP1 Position */ +#define SCB_NSACR_CP1_Msk (1UL << SCB_NSACR_CP1_Pos) /*!< SCB NSACR: CP1 Mask */ + +#define SCB_NSACR_CP0_Pos 0U /*!< SCB NSACR: CP0 Position */ +#define SCB_NSACR_CP0_Msk (1UL /*<< SCB_NSACR_CP0_Pos*/) /*!< SCB NSACR: CP0 Mask */ + +/* SCB Debug Feature Register 0 Definitions */ +#define SCB_ID_DFR_UDE_Pos 28U /*!< SCB ID_DFR: UDE Position */ +#define SCB_ID_DFR_UDE_Msk (0xFUL << SCB_ID_DFR_UDE_Pos) /*!< SCB ID_DFR: UDE Mask */ + +#define SCB_ID_DFR_MProfDbg_Pos 20U /*!< SCB ID_DFR: MProfDbg Position */ +#define SCB_ID_DFR_MProfDbg_Msk (0xFUL << SCB_ID_DFR_MProfDbg_Pos) /*!< SCB ID_DFR: MProfDbg Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB RAS Fault Status Register Definitions */ +#define SCB_RFSR_V_Pos 31U /*!< SCB RFSR: V Position */ +#define SCB_RFSR_V_Msk (1UL << SCB_RFSR_V_Pos) /*!< SCB RFSR: V Mask */ + +#define SCB_RFSR_IS_Pos 16U /*!< SCB RFSR: IS Position */ +#define SCB_RFSR_IS_Msk (0x7FFFUL << SCB_RFSR_IS_Pos) /*!< SCB RFSR: IS Mask */ + +#define SCB_RFSR_UET_Pos 0U /*!< SCB RFSR: UET Position */ +#define SCB_RFSR_UET_Msk (3UL /*<< SCB_RFSR_UET_Pos*/) /*!< SCB RFSR: UET Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED6[3U]; + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) ITM Device Type Register */ + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Sizes Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Sizes Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[809U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) Software Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) Software Lock Status Register */ + uint32_t RESERVED4[4U]; + __IM uint32_t TYPE; /*!< Offset: 0xFC8 (R/ ) Device Identifier Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_SWOSCALER_Pos 0U /*!< TPI ACPR: SWOSCALER Position */ +#define TPI_ACPR_SWOSCALER_Msk (0xFFFFUL /*<< TPI_ACPR_SWOSCALER_Pos*/) /*!< TPI ACPR: SWOSCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFmt_Pos 0U /*!< TPI FFCR: EnFmt Position */ +#define TPI_FFCR_EnFmt_Msk (0x3UL << /*TPI_FFCR_EnFmt_Pos*/) /*!< TPI FFCR: EnFmt Mask */ + +/* TPI Periodic Synchronization Control Register Definitions */ +#define TPI_PSCR_PSCount_Pos 0U /*!< TPI PSCR: PSCount Position */ +#define TPI_PSCR_PSCount_Msk (0x1FUL /*<< TPI_PSCR_PSCount_Pos*/) /*!< TPI PSCR: TPSCount Mask */ + +/* TPI Software Lock Status Register Definitions */ +#define TPI_LSR_nTT_Pos 1U /*!< TPI LSR: Not thirty-two bit. Position */ +#define TPI_LSR_nTT_Msk (0x1UL << TPI_LSR_nTT_Pos) /*!< TPI LSR: Not thirty-two bit. Mask */ + +#define TPI_LSR_SLK_Pos 1U /*!< TPI LSR: Software Lock status Position */ +#define TPI_LSR_SLK_Msk (0x1UL << TPI_LSR_SLK_Pos) /*!< TPI LSR: Software Lock status Mask */ + +#define TPI_LSR_SLI_Pos 0U /*!< TPI LSR: Software Lock implemented Position */ +#define TPI_LSR_SLI_Msk (0x1UL /*<< TPI_LSR_SLI_Pos*/) /*!< TPI LSR: Software Lock implemented Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFO depth Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFO depth Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_PMU Performance Monitoring Unit (PMU) + \brief Type definitions for the Performance Monitoring Unit (PMU) + @{ + */ + +/** + \brief Structure type to access the Performance Monitoring Unit (PMU). + */ +typedef struct +{ + __IOM uint32_t EVCNTR[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x0 (R/W) PMU Event Counter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED0[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCNTR; /*!< Offset: 0x7C (R/W) PMU Cycle Counter Register */ + uint32_t RESERVED1[224]; + __IOM uint32_t EVTYPER[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x400 (R/W) PMU Event Type and Filter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED2[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCFILTR; /*!< Offset: 0x47C (R/W) PMU Cycle Counter Filter Register */ + uint32_t RESERVED3[480]; + __IOM uint32_t CNTENSET; /*!< Offset: 0xC00 (R/W) PMU Count Enable Set Register */ + uint32_t RESERVED4[7]; + __IOM uint32_t CNTENCLR; /*!< Offset: 0xC20 (R/W) PMU Count Enable Clear Register */ + uint32_t RESERVED5[7]; + __IOM uint32_t INTENSET; /*!< Offset: 0xC40 (R/W) PMU Interrupt Enable Set Register */ + uint32_t RESERVED6[7]; + __IOM uint32_t INTENCLR; /*!< Offset: 0xC60 (R/W) PMU Interrupt Enable Clear Register */ + uint32_t RESERVED7[7]; + __IOM uint32_t OVSCLR; /*!< Offset: 0xC80 (R/W) PMU Overflow Flag Status Clear Register */ + uint32_t RESERVED8[7]; + __IOM uint32_t SWINC; /*!< Offset: 0xCA0 (R/W) PMU Software Increment Register */ + uint32_t RESERVED9[7]; + __IOM uint32_t OVSSET; /*!< Offset: 0xCC0 (R/W) PMU Overflow Flag Status Set Register */ + uint32_t RESERVED10[79]; + __IOM uint32_t TYPE; /*!< Offset: 0xE00 (R/W) PMU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0xE04 (R/W) PMU Control Register */ + uint32_t RESERVED11[108]; + __IOM uint32_t AUTHSTATUS; /*!< Offset: 0xFB8 (R/W) PMU Authentication Status Register */ + __IOM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/W) PMU Device Architecture Register */ + uint32_t RESERVED12[4]; + __IOM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/W) PMU Device Type Register */ + __IOM uint32_t PIDR4; /*!< Offset: 0xFD0 (R/W) PMU Peripheral Identification Register 4 */ + uint32_t RESERVED13[3]; + __IOM uint32_t PIDR0; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 0 */ + __IOM uint32_t PIDR1; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 1 */ + __IOM uint32_t PIDR2; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 2 */ + __IOM uint32_t PIDR3; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 3 */ + uint32_t RESERVED14[3]; + __IOM uint32_t CIDR0; /*!< Offset: 0xFF0 (R/W) PMU Component Identification Register 0 */ + __IOM uint32_t CIDR1; /*!< Offset: 0xFF4 (R/W) PMU Component Identification Register 1 */ + __IOM uint32_t CIDR2; /*!< Offset: 0xFF8 (R/W) PMU Component Identification Register 2 */ + __IOM uint32_t CIDR3; /*!< Offset: 0xFFC (R/W) PMU Component Identification Register 3 */ +} PMU_Type; + +/** \brief PMU Event Counter Registers (0-30) Definitions */ + +#define PMU_EVCNTR_CNT_Pos 0U /*!< PMU EVCNTR: Counter Position */ +#define PMU_EVCNTR_CNT_Msk (16UL /*<< PMU_EVCNTRx_CNT_Pos*/) /*!< PMU EVCNTR: Counter Mask */ + +/** \brief PMU Event Type and Filter Registers (0-30) Definitions */ + +#define PMU_EVTYPER_EVENTTOCNT_Pos 0U /*!< PMU EVTYPER: Event to Count Position */ +#define PMU_EVTYPER_EVENTTOCNT_Msk (16UL /*<< EVTYPERx_EVENTTOCNT_Pos*/) /*!< PMU EVTYPER: Event to Count Mask */ + +/** \brief PMU Count Enable Set Register Definitions */ + +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENSET: Event Counter 0 Enable Set Position */ +#define PMU_CNTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENSET_CNT0_ENABLE_Pos*/) /*!< PMU CNTENSET: Event Counter 0 Enable Set Mask */ + +#define PMU_CNTENSET_CNT1_ENABLE_Pos 1U /*!< PMU CNTENSET: Event Counter 1 Enable Set Position */ +#define PMU_CNTENSET_CNT1_ENABLE_Msk (1UL << PMU_CNTENSET_CNT1_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 1 Enable Set Mask */ + +#define PMU_CNTENSET_CNT2_ENABLE_Pos 2U /*!< PMU CNTENSET: Event Counter 2 Enable Set Position */ +#define PMU_CNTENSET_CNT2_ENABLE_Msk (1UL << PMU_CNTENSET_CNT2_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 2 Enable Set Mask */ + +#define PMU_CNTENSET_CNT3_ENABLE_Pos 3U /*!< PMU CNTENSET: Event Counter 3 Enable Set Position */ +#define PMU_CNTENSET_CNT3_ENABLE_Msk (1UL << PMU_CNTENSET_CNT3_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 3 Enable Set Mask */ + +#define PMU_CNTENSET_CNT4_ENABLE_Pos 4U /*!< PMU CNTENSET: Event Counter 4 Enable Set Position */ +#define PMU_CNTENSET_CNT4_ENABLE_Msk (1UL << PMU_CNTENSET_CNT4_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 4 Enable Set Mask */ + +#define PMU_CNTENSET_CNT5_ENABLE_Pos 5U /*!< PMU CNTENSET: Event Counter 5 Enable Set Position */ +#define PMU_CNTENSET_CNT5_ENABLE_Msk (1UL << PMU_CNTENSET_CNT5_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 5 Enable Set Mask */ + +#define PMU_CNTENSET_CNT6_ENABLE_Pos 6U /*!< PMU CNTENSET: Event Counter 6 Enable Set Position */ +#define PMU_CNTENSET_CNT6_ENABLE_Msk (1UL << PMU_CNTENSET_CNT6_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 6 Enable Set Mask */ + +#define PMU_CNTENSET_CNT7_ENABLE_Pos 7U /*!< PMU CNTENSET: Event Counter 7 Enable Set Position */ +#define PMU_CNTENSET_CNT7_ENABLE_Msk (1UL << PMU_CNTENSET_CNT7_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 7 Enable Set Mask */ + +#define PMU_CNTENSET_CNT8_ENABLE_Pos 8U /*!< PMU CNTENSET: Event Counter 8 Enable Set Position */ +#define PMU_CNTENSET_CNT8_ENABLE_Msk (1UL << PMU_CNTENSET_CNT8_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 8 Enable Set Mask */ + +#define PMU_CNTENSET_CNT9_ENABLE_Pos 9U /*!< PMU CNTENSET: Event Counter 9 Enable Set Position */ +#define PMU_CNTENSET_CNT9_ENABLE_Msk (1UL << PMU_CNTENSET_CNT9_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 9 Enable Set Mask */ + +#define PMU_CNTENSET_CNT10_ENABLE_Pos 10U /*!< PMU CNTENSET: Event Counter 10 Enable Set Position */ +#define PMU_CNTENSET_CNT10_ENABLE_Msk (1UL << PMU_CNTENSET_CNT10_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 10 Enable Set Mask */ + +#define PMU_CNTENSET_CNT11_ENABLE_Pos 11U /*!< PMU CNTENSET: Event Counter 11 Enable Set Position */ +#define PMU_CNTENSET_CNT11_ENABLE_Msk (1UL << PMU_CNTENSET_CNT11_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 11 Enable Set Mask */ + +#define PMU_CNTENSET_CNT12_ENABLE_Pos 12U /*!< PMU CNTENSET: Event Counter 12 Enable Set Position */ +#define PMU_CNTENSET_CNT12_ENABLE_Msk (1UL << PMU_CNTENSET_CNT12_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 12 Enable Set Mask */ + +#define PMU_CNTENSET_CNT13_ENABLE_Pos 13U /*!< PMU CNTENSET: Event Counter 13 Enable Set Position */ +#define PMU_CNTENSET_CNT13_ENABLE_Msk (1UL << PMU_CNTENSET_CNT13_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 13 Enable Set Mask */ + +#define PMU_CNTENSET_CNT14_ENABLE_Pos 14U /*!< PMU CNTENSET: Event Counter 14 Enable Set Position */ +#define PMU_CNTENSET_CNT14_ENABLE_Msk (1UL << PMU_CNTENSET_CNT14_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 14 Enable Set Mask */ + +#define PMU_CNTENSET_CNT15_ENABLE_Pos 15U /*!< PMU CNTENSET: Event Counter 15 Enable Set Position */ +#define PMU_CNTENSET_CNT15_ENABLE_Msk (1UL << PMU_CNTENSET_CNT15_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 15 Enable Set Mask */ + +#define PMU_CNTENSET_CNT16_ENABLE_Pos 16U /*!< PMU CNTENSET: Event Counter 16 Enable Set Position */ +#define PMU_CNTENSET_CNT16_ENABLE_Msk (1UL << PMU_CNTENSET_CNT16_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 16 Enable Set Mask */ + +#define PMU_CNTENSET_CNT17_ENABLE_Pos 17U /*!< PMU CNTENSET: Event Counter 17 Enable Set Position */ +#define PMU_CNTENSET_CNT17_ENABLE_Msk (1UL << PMU_CNTENSET_CNT17_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 17 Enable Set Mask */ + +#define PMU_CNTENSET_CNT18_ENABLE_Pos 18U /*!< PMU CNTENSET: Event Counter 18 Enable Set Position */ +#define PMU_CNTENSET_CNT18_ENABLE_Msk (1UL << PMU_CNTENSET_CNT18_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 18 Enable Set Mask */ + +#define PMU_CNTENSET_CNT19_ENABLE_Pos 19U /*!< PMU CNTENSET: Event Counter 19 Enable Set Position */ +#define PMU_CNTENSET_CNT19_ENABLE_Msk (1UL << PMU_CNTENSET_CNT19_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 19 Enable Set Mask */ + +#define PMU_CNTENSET_CNT20_ENABLE_Pos 20U /*!< PMU CNTENSET: Event Counter 20 Enable Set Position */ +#define PMU_CNTENSET_CNT20_ENABLE_Msk (1UL << PMU_CNTENSET_CNT20_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 20 Enable Set Mask */ + +#define PMU_CNTENSET_CNT21_ENABLE_Pos 21U /*!< PMU CNTENSET: Event Counter 21 Enable Set Position */ +#define PMU_CNTENSET_CNT21_ENABLE_Msk (1UL << PMU_CNTENSET_CNT21_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 21 Enable Set Mask */ + +#define PMU_CNTENSET_CNT22_ENABLE_Pos 22U /*!< PMU CNTENSET: Event Counter 22 Enable Set Position */ +#define PMU_CNTENSET_CNT22_ENABLE_Msk (1UL << PMU_CNTENSET_CNT22_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 22 Enable Set Mask */ + +#define PMU_CNTENSET_CNT23_ENABLE_Pos 23U /*!< PMU CNTENSET: Event Counter 23 Enable Set Position */ +#define PMU_CNTENSET_CNT23_ENABLE_Msk (1UL << PMU_CNTENSET_CNT23_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 23 Enable Set Mask */ + +#define PMU_CNTENSET_CNT24_ENABLE_Pos 24U /*!< PMU CNTENSET: Event Counter 24 Enable Set Position */ +#define PMU_CNTENSET_CNT24_ENABLE_Msk (1UL << PMU_CNTENSET_CNT24_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 24 Enable Set Mask */ + +#define PMU_CNTENSET_CNT25_ENABLE_Pos 25U /*!< PMU CNTENSET: Event Counter 25 Enable Set Position */ +#define PMU_CNTENSET_CNT25_ENABLE_Msk (1UL << PMU_CNTENSET_CNT25_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 25 Enable Set Mask */ + +#define PMU_CNTENSET_CNT26_ENABLE_Pos 26U /*!< PMU CNTENSET: Event Counter 26 Enable Set Position */ +#define PMU_CNTENSET_CNT26_ENABLE_Msk (1UL << PMU_CNTENSET_CNT26_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 26 Enable Set Mask */ + +#define PMU_CNTENSET_CNT27_ENABLE_Pos 27U /*!< PMU CNTENSET: Event Counter 27 Enable Set Position */ +#define PMU_CNTENSET_CNT27_ENABLE_Msk (1UL << PMU_CNTENSET_CNT27_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 27 Enable Set Mask */ + +#define PMU_CNTENSET_CNT28_ENABLE_Pos 28U /*!< PMU CNTENSET: Event Counter 28 Enable Set Position */ +#define PMU_CNTENSET_CNT28_ENABLE_Msk (1UL << PMU_CNTENSET_CNT28_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 28 Enable Set Mask */ + +#define PMU_CNTENSET_CNT29_ENABLE_Pos 29U /*!< PMU CNTENSET: Event Counter 29 Enable Set Position */ +#define PMU_CNTENSET_CNT29_ENABLE_Msk (1UL << PMU_CNTENSET_CNT29_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 29 Enable Set Mask */ + +#define PMU_CNTENSET_CNT30_ENABLE_Pos 30U /*!< PMU CNTENSET: Event Counter 30 Enable Set Position */ +#define PMU_CNTENSET_CNT30_ENABLE_Msk (1UL << PMU_CNTENSET_CNT30_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 30 Enable Set Mask */ + +#define PMU_CNTENSET_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENSET: Cycle Counter Enable Set Position */ +#define PMU_CNTENSET_CCNTR_ENABLE_Msk (1UL << PMU_CNTENSET_CCNTR_ENABLE_Pos) /*!< PMU CNTENSET: Cycle Counter Enable Set Mask */ + +/** \brief PMU Count Enable Clear Register Definitions */ + +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Position */ +#define PMU_CNTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU CNTENCLR: Event Counter 1 Enable Clear Position */ +#define PMU_CNTENCLR_CNT1_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT1_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 1 Enable Clear */ + +#define PMU_CNTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Position */ +#define PMU_CNTENCLR_CNT2_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT2_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Position */ +#define PMU_CNTENCLR_CNT3_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT3_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Position */ +#define PMU_CNTENCLR_CNT4_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT4_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Position */ +#define PMU_CNTENCLR_CNT5_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT5_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Position */ +#define PMU_CNTENCLR_CNT6_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT6_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Position */ +#define PMU_CNTENCLR_CNT7_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT7_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Position */ +#define PMU_CNTENCLR_CNT8_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT8_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Position */ +#define PMU_CNTENCLR_CNT9_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT9_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Position */ +#define PMU_CNTENCLR_CNT10_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT10_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Position */ +#define PMU_CNTENCLR_CNT11_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT11_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Position */ +#define PMU_CNTENCLR_CNT12_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT12_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Position */ +#define PMU_CNTENCLR_CNT13_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT13_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Position */ +#define PMU_CNTENCLR_CNT14_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT14_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Position */ +#define PMU_CNTENCLR_CNT15_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT15_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Position */ +#define PMU_CNTENCLR_CNT16_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT16_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Position */ +#define PMU_CNTENCLR_CNT17_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT17_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Position */ +#define PMU_CNTENCLR_CNT18_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT18_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Position */ +#define PMU_CNTENCLR_CNT19_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT19_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Position */ +#define PMU_CNTENCLR_CNT20_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT20_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Position */ +#define PMU_CNTENCLR_CNT21_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT21_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Position */ +#define PMU_CNTENCLR_CNT22_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT22_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Position */ +#define PMU_CNTENCLR_CNT23_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT23_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Position */ +#define PMU_CNTENCLR_CNT24_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT24_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Position */ +#define PMU_CNTENCLR_CNT25_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT25_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Position */ +#define PMU_CNTENCLR_CNT26_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT26_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Position */ +#define PMU_CNTENCLR_CNT27_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT27_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Position */ +#define PMU_CNTENCLR_CNT28_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT28_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Position */ +#define PMU_CNTENCLR_CNT29_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT29_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Position */ +#define PMU_CNTENCLR_CNT30_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT30_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Mask */ + +#define PMU_CNTENCLR_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENCLR: Cycle Counter Enable Clear Position */ +#define PMU_CNTENCLR_CCNTR_ENABLE_Msk (1UL << PMU_CNTENCLR_CCNTR_ENABLE_Pos) /*!< PMU CNTENCLR: Cycle Counter Enable Clear Mask */ + +/** \brief PMU Interrupt Enable Set Register Definitions */ + +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENSET_CNT0_ENABLE_Pos*/) /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT1_ENABLE_Pos 1U /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT1_ENABLE_Msk (1UL << PMU_INTENSET_CNT1_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT2_ENABLE_Pos 2U /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT2_ENABLE_Msk (1UL << PMU_INTENSET_CNT2_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT3_ENABLE_Pos 3U /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT3_ENABLE_Msk (1UL << PMU_INTENSET_CNT3_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT4_ENABLE_Pos 4U /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT4_ENABLE_Msk (1UL << PMU_INTENSET_CNT4_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT5_ENABLE_Pos 5U /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT5_ENABLE_Msk (1UL << PMU_INTENSET_CNT5_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT6_ENABLE_Pos 6U /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT6_ENABLE_Msk (1UL << PMU_INTENSET_CNT6_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT7_ENABLE_Pos 7U /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT7_ENABLE_Msk (1UL << PMU_INTENSET_CNT7_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT8_ENABLE_Pos 8U /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT8_ENABLE_Msk (1UL << PMU_INTENSET_CNT8_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT9_ENABLE_Pos 9U /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT9_ENABLE_Msk (1UL << PMU_INTENSET_CNT9_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT10_ENABLE_Pos 10U /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT10_ENABLE_Msk (1UL << PMU_INTENSET_CNT10_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT11_ENABLE_Pos 11U /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT11_ENABLE_Msk (1UL << PMU_INTENSET_CNT11_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT12_ENABLE_Pos 12U /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT12_ENABLE_Msk (1UL << PMU_INTENSET_CNT12_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT13_ENABLE_Pos 13U /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT13_ENABLE_Msk (1UL << PMU_INTENSET_CNT13_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT14_ENABLE_Pos 14U /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT14_ENABLE_Msk (1UL << PMU_INTENSET_CNT14_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT15_ENABLE_Pos 15U /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT15_ENABLE_Msk (1UL << PMU_INTENSET_CNT15_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT16_ENABLE_Pos 16U /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT16_ENABLE_Msk (1UL << PMU_INTENSET_CNT16_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT17_ENABLE_Pos 17U /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT17_ENABLE_Msk (1UL << PMU_INTENSET_CNT17_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT18_ENABLE_Pos 18U /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT18_ENABLE_Msk (1UL << PMU_INTENSET_CNT18_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT19_ENABLE_Pos 19U /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT19_ENABLE_Msk (1UL << PMU_INTENSET_CNT19_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT20_ENABLE_Pos 20U /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT20_ENABLE_Msk (1UL << PMU_INTENSET_CNT20_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT21_ENABLE_Pos 21U /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT21_ENABLE_Msk (1UL << PMU_INTENSET_CNT21_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT22_ENABLE_Pos 22U /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT22_ENABLE_Msk (1UL << PMU_INTENSET_CNT22_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT23_ENABLE_Pos 23U /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT23_ENABLE_Msk (1UL << PMU_INTENSET_CNT23_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT24_ENABLE_Pos 24U /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT24_ENABLE_Msk (1UL << PMU_INTENSET_CNT24_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT25_ENABLE_Pos 25U /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT25_ENABLE_Msk (1UL << PMU_INTENSET_CNT25_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT26_ENABLE_Pos 26U /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT26_ENABLE_Msk (1UL << PMU_INTENSET_CNT26_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT27_ENABLE_Pos 27U /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT27_ENABLE_Msk (1UL << PMU_INTENSET_CNT27_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT28_ENABLE_Pos 28U /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT28_ENABLE_Msk (1UL << PMU_INTENSET_CNT28_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT29_ENABLE_Pos 29U /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT29_ENABLE_Msk (1UL << PMU_INTENSET_CNT29_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT30_ENABLE_Pos 30U /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT30_ENABLE_Msk (1UL << PMU_INTENSET_CNT30_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Position */ +#define PMU_INTENSET_CCYCNT_ENABLE_Msk (1UL << PMU_INTENSET_CYCCNT_ENABLE_Pos) /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Mask */ + +/** \brief PMU Interrupt Enable Clear Register Definitions */ + +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT1_ENABLE_Msk (1UL << PMU_INTENCLR_CNT1_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear */ + +#define PMU_INTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT2_ENABLE_Msk (1UL << PMU_INTENCLR_CNT2_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT3_ENABLE_Msk (1UL << PMU_INTENCLR_CNT3_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT4_ENABLE_Msk (1UL << PMU_INTENCLR_CNT4_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT5_ENABLE_Msk (1UL << PMU_INTENCLR_CNT5_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT6_ENABLE_Msk (1UL << PMU_INTENCLR_CNT6_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT7_ENABLE_Msk (1UL << PMU_INTENCLR_CNT7_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT8_ENABLE_Msk (1UL << PMU_INTENCLR_CNT8_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT9_ENABLE_Msk (1UL << PMU_INTENCLR_CNT9_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT10_ENABLE_Msk (1UL << PMU_INTENCLR_CNT10_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT11_ENABLE_Msk (1UL << PMU_INTENCLR_CNT11_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT12_ENABLE_Msk (1UL << PMU_INTENCLR_CNT12_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT13_ENABLE_Msk (1UL << PMU_INTENCLR_CNT13_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT14_ENABLE_Msk (1UL << PMU_INTENCLR_CNT14_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT15_ENABLE_Msk (1UL << PMU_INTENCLR_CNT15_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT16_ENABLE_Msk (1UL << PMU_INTENCLR_CNT16_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT17_ENABLE_Msk (1UL << PMU_INTENCLR_CNT17_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT18_ENABLE_Msk (1UL << PMU_INTENCLR_CNT18_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT19_ENABLE_Msk (1UL << PMU_INTENCLR_CNT19_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT20_ENABLE_Msk (1UL << PMU_INTENCLR_CNT20_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT21_ENABLE_Msk (1UL << PMU_INTENCLR_CNT21_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT22_ENABLE_Msk (1UL << PMU_INTENCLR_CNT22_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT23_ENABLE_Msk (1UL << PMU_INTENCLR_CNT23_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT24_ENABLE_Msk (1UL << PMU_INTENCLR_CNT24_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT25_ENABLE_Msk (1UL << PMU_INTENCLR_CNT25_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT26_ENABLE_Msk (1UL << PMU_INTENCLR_CNT26_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT27_ENABLE_Msk (1UL << PMU_INTENCLR_CNT27_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT28_ENABLE_Msk (1UL << PMU_INTENCLR_CNT28_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT29_ENABLE_Msk (1UL << PMU_INTENCLR_CNT29_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT30_ENABLE_Msk (1UL << PMU_INTENCLR_CNT30_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CYCCNT_ENABLE_Msk (1UL << PMU_INTENCLR_CYCCNT_ENABLE_Pos) /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Mask */ + +/** \brief PMU Overflow Flag Status Set Register Definitions */ + +#define PMU_OVSSET_CNT0_STATUS_Pos 0U /*!< PMU OVSSET: Event Counter 0 Overflow Set Position */ +#define PMU_OVSSET_CNT0_STATUS_Msk (1UL /*<< PMU_OVSSET_CNT0_STATUS_Pos*/) /*!< PMU OVSSET: Event Counter 0 Overflow Set Mask */ + +#define PMU_OVSSET_CNT1_STATUS_Pos 1U /*!< PMU OVSSET: Event Counter 1 Overflow Set Position */ +#define PMU_OVSSET_CNT1_STATUS_Msk (1UL << PMU_OVSSET_CNT1_STATUS_Pos) /*!< PMU OVSSET: Event Counter 1 Overflow Set Mask */ + +#define PMU_OVSSET_CNT2_STATUS_Pos 2U /*!< PMU OVSSET: Event Counter 2 Overflow Set Position */ +#define PMU_OVSSET_CNT2_STATUS_Msk (1UL << PMU_OVSSET_CNT2_STATUS_Pos) /*!< PMU OVSSET: Event Counter 2 Overflow Set Mask */ + +#define PMU_OVSSET_CNT3_STATUS_Pos 3U /*!< PMU OVSSET: Event Counter 3 Overflow Set Position */ +#define PMU_OVSSET_CNT3_STATUS_Msk (1UL << PMU_OVSSET_CNT3_STATUS_Pos) /*!< PMU OVSSET: Event Counter 3 Overflow Set Mask */ + +#define PMU_OVSSET_CNT4_STATUS_Pos 4U /*!< PMU OVSSET: Event Counter 4 Overflow Set Position */ +#define PMU_OVSSET_CNT4_STATUS_Msk (1UL << PMU_OVSSET_CNT4_STATUS_Pos) /*!< PMU OVSSET: Event Counter 4 Overflow Set Mask */ + +#define PMU_OVSSET_CNT5_STATUS_Pos 5U /*!< PMU OVSSET: Event Counter 5 Overflow Set Position */ +#define PMU_OVSSET_CNT5_STATUS_Msk (1UL << PMU_OVSSET_CNT5_STATUS_Pos) /*!< PMU OVSSET: Event Counter 5 Overflow Set Mask */ + +#define PMU_OVSSET_CNT6_STATUS_Pos 6U /*!< PMU OVSSET: Event Counter 6 Overflow Set Position */ +#define PMU_OVSSET_CNT6_STATUS_Msk (1UL << PMU_OVSSET_CNT6_STATUS_Pos) /*!< PMU OVSSET: Event Counter 6 Overflow Set Mask */ + +#define PMU_OVSSET_CNT7_STATUS_Pos 7U /*!< PMU OVSSET: Event Counter 7 Overflow Set Position */ +#define PMU_OVSSET_CNT7_STATUS_Msk (1UL << PMU_OVSSET_CNT7_STATUS_Pos) /*!< PMU OVSSET: Event Counter 7 Overflow Set Mask */ + +#define PMU_OVSSET_CNT8_STATUS_Pos 8U /*!< PMU OVSSET: Event Counter 8 Overflow Set Position */ +#define PMU_OVSSET_CNT8_STATUS_Msk (1UL << PMU_OVSSET_CNT8_STATUS_Pos) /*!< PMU OVSSET: Event Counter 8 Overflow Set Mask */ + +#define PMU_OVSSET_CNT9_STATUS_Pos 9U /*!< PMU OVSSET: Event Counter 9 Overflow Set Position */ +#define PMU_OVSSET_CNT9_STATUS_Msk (1UL << PMU_OVSSET_CNT9_STATUS_Pos) /*!< PMU OVSSET: Event Counter 9 Overflow Set Mask */ + +#define PMU_OVSSET_CNT10_STATUS_Pos 10U /*!< PMU OVSSET: Event Counter 10 Overflow Set Position */ +#define PMU_OVSSET_CNT10_STATUS_Msk (1UL << PMU_OVSSET_CNT10_STATUS_Pos) /*!< PMU OVSSET: Event Counter 10 Overflow Set Mask */ + +#define PMU_OVSSET_CNT11_STATUS_Pos 11U /*!< PMU OVSSET: Event Counter 11 Overflow Set Position */ +#define PMU_OVSSET_CNT11_STATUS_Msk (1UL << PMU_OVSSET_CNT11_STATUS_Pos) /*!< PMU OVSSET: Event Counter 11 Overflow Set Mask */ + +#define PMU_OVSSET_CNT12_STATUS_Pos 12U /*!< PMU OVSSET: Event Counter 12 Overflow Set Position */ +#define PMU_OVSSET_CNT12_STATUS_Msk (1UL << PMU_OVSSET_CNT12_STATUS_Pos) /*!< PMU OVSSET: Event Counter 12 Overflow Set Mask */ + +#define PMU_OVSSET_CNT13_STATUS_Pos 13U /*!< PMU OVSSET: Event Counter 13 Overflow Set Position */ +#define PMU_OVSSET_CNT13_STATUS_Msk (1UL << PMU_OVSSET_CNT13_STATUS_Pos) /*!< PMU OVSSET: Event Counter 13 Overflow Set Mask */ + +#define PMU_OVSSET_CNT14_STATUS_Pos 14U /*!< PMU OVSSET: Event Counter 14 Overflow Set Position */ +#define PMU_OVSSET_CNT14_STATUS_Msk (1UL << PMU_OVSSET_CNT14_STATUS_Pos) /*!< PMU OVSSET: Event Counter 14 Overflow Set Mask */ + +#define PMU_OVSSET_CNT15_STATUS_Pos 15U /*!< PMU OVSSET: Event Counter 15 Overflow Set Position */ +#define PMU_OVSSET_CNT15_STATUS_Msk (1UL << PMU_OVSSET_CNT15_STATUS_Pos) /*!< PMU OVSSET: Event Counter 15 Overflow Set Mask */ + +#define PMU_OVSSET_CNT16_STATUS_Pos 16U /*!< PMU OVSSET: Event Counter 16 Overflow Set Position */ +#define PMU_OVSSET_CNT16_STATUS_Msk (1UL << PMU_OVSSET_CNT16_STATUS_Pos) /*!< PMU OVSSET: Event Counter 16 Overflow Set Mask */ + +#define PMU_OVSSET_CNT17_STATUS_Pos 17U /*!< PMU OVSSET: Event Counter 17 Overflow Set Position */ +#define PMU_OVSSET_CNT17_STATUS_Msk (1UL << PMU_OVSSET_CNT17_STATUS_Pos) /*!< PMU OVSSET: Event Counter 17 Overflow Set Mask */ + +#define PMU_OVSSET_CNT18_STATUS_Pos 18U /*!< PMU OVSSET: Event Counter 18 Overflow Set Position */ +#define PMU_OVSSET_CNT18_STATUS_Msk (1UL << PMU_OVSSET_CNT18_STATUS_Pos) /*!< PMU OVSSET: Event Counter 18 Overflow Set Mask */ + +#define PMU_OVSSET_CNT19_STATUS_Pos 19U /*!< PMU OVSSET: Event Counter 19 Overflow Set Position */ +#define PMU_OVSSET_CNT19_STATUS_Msk (1UL << PMU_OVSSET_CNT19_STATUS_Pos) /*!< PMU OVSSET: Event Counter 19 Overflow Set Mask */ + +#define PMU_OVSSET_CNT20_STATUS_Pos 20U /*!< PMU OVSSET: Event Counter 20 Overflow Set Position */ +#define PMU_OVSSET_CNT20_STATUS_Msk (1UL << PMU_OVSSET_CNT20_STATUS_Pos) /*!< PMU OVSSET: Event Counter 20 Overflow Set Mask */ + +#define PMU_OVSSET_CNT21_STATUS_Pos 21U /*!< PMU OVSSET: Event Counter 21 Overflow Set Position */ +#define PMU_OVSSET_CNT21_STATUS_Msk (1UL << PMU_OVSSET_CNT21_STATUS_Pos) /*!< PMU OVSSET: Event Counter 21 Overflow Set Mask */ + +#define PMU_OVSSET_CNT22_STATUS_Pos 22U /*!< PMU OVSSET: Event Counter 22 Overflow Set Position */ +#define PMU_OVSSET_CNT22_STATUS_Msk (1UL << PMU_OVSSET_CNT22_STATUS_Pos) /*!< PMU OVSSET: Event Counter 22 Overflow Set Mask */ + +#define PMU_OVSSET_CNT23_STATUS_Pos 23U /*!< PMU OVSSET: Event Counter 23 Overflow Set Position */ +#define PMU_OVSSET_CNT23_STATUS_Msk (1UL << PMU_OVSSET_CNT23_STATUS_Pos) /*!< PMU OVSSET: Event Counter 23 Overflow Set Mask */ + +#define PMU_OVSSET_CNT24_STATUS_Pos 24U /*!< PMU OVSSET: Event Counter 24 Overflow Set Position */ +#define PMU_OVSSET_CNT24_STATUS_Msk (1UL << PMU_OVSSET_CNT24_STATUS_Pos) /*!< PMU OVSSET: Event Counter 24 Overflow Set Mask */ + +#define PMU_OVSSET_CNT25_STATUS_Pos 25U /*!< PMU OVSSET: Event Counter 25 Overflow Set Position */ +#define PMU_OVSSET_CNT25_STATUS_Msk (1UL << PMU_OVSSET_CNT25_STATUS_Pos) /*!< PMU OVSSET: Event Counter 25 Overflow Set Mask */ + +#define PMU_OVSSET_CNT26_STATUS_Pos 26U /*!< PMU OVSSET: Event Counter 26 Overflow Set Position */ +#define PMU_OVSSET_CNT26_STATUS_Msk (1UL << PMU_OVSSET_CNT26_STATUS_Pos) /*!< PMU OVSSET: Event Counter 26 Overflow Set Mask */ + +#define PMU_OVSSET_CNT27_STATUS_Pos 27U /*!< PMU OVSSET: Event Counter 27 Overflow Set Position */ +#define PMU_OVSSET_CNT27_STATUS_Msk (1UL << PMU_OVSSET_CNT27_STATUS_Pos) /*!< PMU OVSSET: Event Counter 27 Overflow Set Mask */ + +#define PMU_OVSSET_CNT28_STATUS_Pos 28U /*!< PMU OVSSET: Event Counter 28 Overflow Set Position */ +#define PMU_OVSSET_CNT28_STATUS_Msk (1UL << PMU_OVSSET_CNT28_STATUS_Pos) /*!< PMU OVSSET: Event Counter 28 Overflow Set Mask */ + +#define PMU_OVSSET_CNT29_STATUS_Pos 29U /*!< PMU OVSSET: Event Counter 29 Overflow Set Position */ +#define PMU_OVSSET_CNT29_STATUS_Msk (1UL << PMU_OVSSET_CNT29_STATUS_Pos) /*!< PMU OVSSET: Event Counter 29 Overflow Set Mask */ + +#define PMU_OVSSET_CNT30_STATUS_Pos 30U /*!< PMU OVSSET: Event Counter 30 Overflow Set Position */ +#define PMU_OVSSET_CNT30_STATUS_Msk (1UL << PMU_OVSSET_CNT30_STATUS_Pos) /*!< PMU OVSSET: Event Counter 30 Overflow Set Mask */ + +#define PMU_OVSSET_CYCCNT_STATUS_Pos 31U /*!< PMU OVSSET: Cycle Counter Overflow Set Position */ +#define PMU_OVSSET_CYCCNT_STATUS_Msk (1UL << PMU_OVSSET_CYCCNT_STATUS_Pos) /*!< PMU OVSSET: Cycle Counter Overflow Set Mask */ + +/** \brief PMU Overflow Flag Status Clear Register Definitions */ + +#define PMU_OVSCLR_CNT0_STATUS_Pos 0U /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Position */ +#define PMU_OVSCLR_CNT0_STATUS_Msk (1UL /*<< PMU_OVSCLR_CNT0_STATUS_Pos*/) /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT1_STATUS_Pos 1U /*!< PMU OVSCLR: Event Counter 1 Overflow Clear Position */ +#define PMU_OVSCLR_CNT1_STATUS_Msk (1UL << PMU_OVSCLR_CNT1_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 1 Overflow Clear */ + +#define PMU_OVSCLR_CNT2_STATUS_Pos 2U /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Position */ +#define PMU_OVSCLR_CNT2_STATUS_Msk (1UL << PMU_OVSCLR_CNT2_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT3_STATUS_Pos 3U /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Position */ +#define PMU_OVSCLR_CNT3_STATUS_Msk (1UL << PMU_OVSCLR_CNT3_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT4_STATUS_Pos 4U /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Position */ +#define PMU_OVSCLR_CNT4_STATUS_Msk (1UL << PMU_OVSCLR_CNT4_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT5_STATUS_Pos 5U /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Position */ +#define PMU_OVSCLR_CNT5_STATUS_Msk (1UL << PMU_OVSCLR_CNT5_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT6_STATUS_Pos 6U /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Position */ +#define PMU_OVSCLR_CNT6_STATUS_Msk (1UL << PMU_OVSCLR_CNT6_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT7_STATUS_Pos 7U /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Position */ +#define PMU_OVSCLR_CNT7_STATUS_Msk (1UL << PMU_OVSCLR_CNT7_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT8_STATUS_Pos 8U /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Position */ +#define PMU_OVSCLR_CNT8_STATUS_Msk (1UL << PMU_OVSCLR_CNT8_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT9_STATUS_Pos 9U /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Position */ +#define PMU_OVSCLR_CNT9_STATUS_Msk (1UL << PMU_OVSCLR_CNT9_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT10_STATUS_Pos 10U /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Position */ +#define PMU_OVSCLR_CNT10_STATUS_Msk (1UL << PMU_OVSCLR_CNT10_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT11_STATUS_Pos 11U /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Position */ +#define PMU_OVSCLR_CNT11_STATUS_Msk (1UL << PMU_OVSCLR_CNT11_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT12_STATUS_Pos 12U /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Position */ +#define PMU_OVSCLR_CNT12_STATUS_Msk (1UL << PMU_OVSCLR_CNT12_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT13_STATUS_Pos 13U /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Position */ +#define PMU_OVSCLR_CNT13_STATUS_Msk (1UL << PMU_OVSCLR_CNT13_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT14_STATUS_Pos 14U /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Position */ +#define PMU_OVSCLR_CNT14_STATUS_Msk (1UL << PMU_OVSCLR_CNT14_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT15_STATUS_Pos 15U /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Position */ +#define PMU_OVSCLR_CNT15_STATUS_Msk (1UL << PMU_OVSCLR_CNT15_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT16_STATUS_Pos 16U /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Position */ +#define PMU_OVSCLR_CNT16_STATUS_Msk (1UL << PMU_OVSCLR_CNT16_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT17_STATUS_Pos 17U /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Position */ +#define PMU_OVSCLR_CNT17_STATUS_Msk (1UL << PMU_OVSCLR_CNT17_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT18_STATUS_Pos 18U /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Position */ +#define PMU_OVSCLR_CNT18_STATUS_Msk (1UL << PMU_OVSCLR_CNT18_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT19_STATUS_Pos 19U /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Position */ +#define PMU_OVSCLR_CNT19_STATUS_Msk (1UL << PMU_OVSCLR_CNT19_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT20_STATUS_Pos 20U /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Position */ +#define PMU_OVSCLR_CNT20_STATUS_Msk (1UL << PMU_OVSCLR_CNT20_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT21_STATUS_Pos 21U /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Position */ +#define PMU_OVSCLR_CNT21_STATUS_Msk (1UL << PMU_OVSCLR_CNT21_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT22_STATUS_Pos 22U /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Position */ +#define PMU_OVSCLR_CNT22_STATUS_Msk (1UL << PMU_OVSCLR_CNT22_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT23_STATUS_Pos 23U /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Position */ +#define PMU_OVSCLR_CNT23_STATUS_Msk (1UL << PMU_OVSCLR_CNT23_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT24_STATUS_Pos 24U /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Position */ +#define PMU_OVSCLR_CNT24_STATUS_Msk (1UL << PMU_OVSCLR_CNT24_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT25_STATUS_Pos 25U /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Position */ +#define PMU_OVSCLR_CNT25_STATUS_Msk (1UL << PMU_OVSCLR_CNT25_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT26_STATUS_Pos 26U /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Position */ +#define PMU_OVSCLR_CNT26_STATUS_Msk (1UL << PMU_OVSCLR_CNT26_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT27_STATUS_Pos 27U /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Position */ +#define PMU_OVSCLR_CNT27_STATUS_Msk (1UL << PMU_OVSCLR_CNT27_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT28_STATUS_Pos 28U /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Position */ +#define PMU_OVSCLR_CNT28_STATUS_Msk (1UL << PMU_OVSCLR_CNT28_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT29_STATUS_Pos 29U /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Position */ +#define PMU_OVSCLR_CNT29_STATUS_Msk (1UL << PMU_OVSCLR_CNT29_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT30_STATUS_Pos 30U /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Position */ +#define PMU_OVSCLR_CNT30_STATUS_Msk (1UL << PMU_OVSCLR_CNT30_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Mask */ + +#define PMU_OVSCLR_CYCCNT_STATUS_Pos 31U /*!< PMU OVSCLR: Cycle Counter Overflow Clear Position */ +#define PMU_OVSCLR_CYCCNT_STATUS_Msk (1UL << PMU_OVSCLR_CYCCNT_STATUS_Pos) /*!< PMU OVSCLR: Cycle Counter Overflow Clear Mask */ + +/** \brief PMU Software Increment Counter */ + +#define PMU_SWINC_CNT0_Pos 0U /*!< PMU SWINC: Event Counter 0 Software Increment Position */ +#define PMU_SWINC_CNT0_Msk (1UL /*<< PMU_SWINC_CNT0_Pos */) /*!< PMU SWINC: Event Counter 0 Software Increment Mask */ + +#define PMU_SWINC_CNT1_Pos 1U /*!< PMU SWINC: Event Counter 1 Software Increment Position */ +#define PMU_SWINC_CNT1_Msk (1UL << PMU_SWINC_CNT1_Pos) /*!< PMU SWINC: Event Counter 1 Software Increment Mask */ + +#define PMU_SWINC_CNT2_Pos 2U /*!< PMU SWINC: Event Counter 2 Software Increment Position */ +#define PMU_SWINC_CNT2_Msk (1UL << PMU_SWINC_CNT2_Pos) /*!< PMU SWINC: Event Counter 2 Software Increment Mask */ + +#define PMU_SWINC_CNT3_Pos 3U /*!< PMU SWINC: Event Counter 3 Software Increment Position */ +#define PMU_SWINC_CNT3_Msk (1UL << PMU_SWINC_CNT3_Pos) /*!< PMU SWINC: Event Counter 3 Software Increment Mask */ + +#define PMU_SWINC_CNT4_Pos 4U /*!< PMU SWINC: Event Counter 4 Software Increment Position */ +#define PMU_SWINC_CNT4_Msk (1UL << PMU_SWINC_CNT4_Pos) /*!< PMU SWINC: Event Counter 4 Software Increment Mask */ + +#define PMU_SWINC_CNT5_Pos 5U /*!< PMU SWINC: Event Counter 5 Software Increment Position */ +#define PMU_SWINC_CNT5_Msk (1UL << PMU_SWINC_CNT5_Pos) /*!< PMU SWINC: Event Counter 5 Software Increment Mask */ + +#define PMU_SWINC_CNT6_Pos 6U /*!< PMU SWINC: Event Counter 6 Software Increment Position */ +#define PMU_SWINC_CNT6_Msk (1UL << PMU_SWINC_CNT6_Pos) /*!< PMU SWINC: Event Counter 6 Software Increment Mask */ + +#define PMU_SWINC_CNT7_Pos 7U /*!< PMU SWINC: Event Counter 7 Software Increment Position */ +#define PMU_SWINC_CNT7_Msk (1UL << PMU_SWINC_CNT7_Pos) /*!< PMU SWINC: Event Counter 7 Software Increment Mask */ + +#define PMU_SWINC_CNT8_Pos 8U /*!< PMU SWINC: Event Counter 8 Software Increment Position */ +#define PMU_SWINC_CNT8_Msk (1UL << PMU_SWINC_CNT8_Pos) /*!< PMU SWINC: Event Counter 8 Software Increment Mask */ + +#define PMU_SWINC_CNT9_Pos 9U /*!< PMU SWINC: Event Counter 9 Software Increment Position */ +#define PMU_SWINC_CNT9_Msk (1UL << PMU_SWINC_CNT9_Pos) /*!< PMU SWINC: Event Counter 9 Software Increment Mask */ + +#define PMU_SWINC_CNT10_Pos 10U /*!< PMU SWINC: Event Counter 10 Software Increment Position */ +#define PMU_SWINC_CNT10_Msk (1UL << PMU_SWINC_CNT10_Pos) /*!< PMU SWINC: Event Counter 10 Software Increment Mask */ + +#define PMU_SWINC_CNT11_Pos 11U /*!< PMU SWINC: Event Counter 11 Software Increment Position */ +#define PMU_SWINC_CNT11_Msk (1UL << PMU_SWINC_CNT11_Pos) /*!< PMU SWINC: Event Counter 11 Software Increment Mask */ + +#define PMU_SWINC_CNT12_Pos 12U /*!< PMU SWINC: Event Counter 12 Software Increment Position */ +#define PMU_SWINC_CNT12_Msk (1UL << PMU_SWINC_CNT12_Pos) /*!< PMU SWINC: Event Counter 12 Software Increment Mask */ + +#define PMU_SWINC_CNT13_Pos 13U /*!< PMU SWINC: Event Counter 13 Software Increment Position */ +#define PMU_SWINC_CNT13_Msk (1UL << PMU_SWINC_CNT13_Pos) /*!< PMU SWINC: Event Counter 13 Software Increment Mask */ + +#define PMU_SWINC_CNT14_Pos 14U /*!< PMU SWINC: Event Counter 14 Software Increment Position */ +#define PMU_SWINC_CNT14_Msk (1UL << PMU_SWINC_CNT14_Pos) /*!< PMU SWINC: Event Counter 14 Software Increment Mask */ + +#define PMU_SWINC_CNT15_Pos 15U /*!< PMU SWINC: Event Counter 15 Software Increment Position */ +#define PMU_SWINC_CNT15_Msk (1UL << PMU_SWINC_CNT15_Pos) /*!< PMU SWINC: Event Counter 15 Software Increment Mask */ + +#define PMU_SWINC_CNT16_Pos 16U /*!< PMU SWINC: Event Counter 16 Software Increment Position */ +#define PMU_SWINC_CNT16_Msk (1UL << PMU_SWINC_CNT16_Pos) /*!< PMU SWINC: Event Counter 16 Software Increment Mask */ + +#define PMU_SWINC_CNT17_Pos 17U /*!< PMU SWINC: Event Counter 17 Software Increment Position */ +#define PMU_SWINC_CNT17_Msk (1UL << PMU_SWINC_CNT17_Pos) /*!< PMU SWINC: Event Counter 17 Software Increment Mask */ + +#define PMU_SWINC_CNT18_Pos 18U /*!< PMU SWINC: Event Counter 18 Software Increment Position */ +#define PMU_SWINC_CNT18_Msk (1UL << PMU_SWINC_CNT18_Pos) /*!< PMU SWINC: Event Counter 18 Software Increment Mask */ + +#define PMU_SWINC_CNT19_Pos 19U /*!< PMU SWINC: Event Counter 19 Software Increment Position */ +#define PMU_SWINC_CNT19_Msk (1UL << PMU_SWINC_CNT19_Pos) /*!< PMU SWINC: Event Counter 19 Software Increment Mask */ + +#define PMU_SWINC_CNT20_Pos 20U /*!< PMU SWINC: Event Counter 20 Software Increment Position */ +#define PMU_SWINC_CNT20_Msk (1UL << PMU_SWINC_CNT20_Pos) /*!< PMU SWINC: Event Counter 20 Software Increment Mask */ + +#define PMU_SWINC_CNT21_Pos 21U /*!< PMU SWINC: Event Counter 21 Software Increment Position */ +#define PMU_SWINC_CNT21_Msk (1UL << PMU_SWINC_CNT21_Pos) /*!< PMU SWINC: Event Counter 21 Software Increment Mask */ + +#define PMU_SWINC_CNT22_Pos 22U /*!< PMU SWINC: Event Counter 22 Software Increment Position */ +#define PMU_SWINC_CNT22_Msk (1UL << PMU_SWINC_CNT22_Pos) /*!< PMU SWINC: Event Counter 22 Software Increment Mask */ + +#define PMU_SWINC_CNT23_Pos 23U /*!< PMU SWINC: Event Counter 23 Software Increment Position */ +#define PMU_SWINC_CNT23_Msk (1UL << PMU_SWINC_CNT23_Pos) /*!< PMU SWINC: Event Counter 23 Software Increment Mask */ + +#define PMU_SWINC_CNT24_Pos 24U /*!< PMU SWINC: Event Counter 24 Software Increment Position */ +#define PMU_SWINC_CNT24_Msk (1UL << PMU_SWINC_CNT24_Pos) /*!< PMU SWINC: Event Counter 24 Software Increment Mask */ + +#define PMU_SWINC_CNT25_Pos 25U /*!< PMU SWINC: Event Counter 25 Software Increment Position */ +#define PMU_SWINC_CNT25_Msk (1UL << PMU_SWINC_CNT25_Pos) /*!< PMU SWINC: Event Counter 25 Software Increment Mask */ + +#define PMU_SWINC_CNT26_Pos 26U /*!< PMU SWINC: Event Counter 26 Software Increment Position */ +#define PMU_SWINC_CNT26_Msk (1UL << PMU_SWINC_CNT26_Pos) /*!< PMU SWINC: Event Counter 26 Software Increment Mask */ + +#define PMU_SWINC_CNT27_Pos 27U /*!< PMU SWINC: Event Counter 27 Software Increment Position */ +#define PMU_SWINC_CNT27_Msk (1UL << PMU_SWINC_CNT27_Pos) /*!< PMU SWINC: Event Counter 27 Software Increment Mask */ + +#define PMU_SWINC_CNT28_Pos 28U /*!< PMU SWINC: Event Counter 28 Software Increment Position */ +#define PMU_SWINC_CNT28_Msk (1UL << PMU_SWINC_CNT28_Pos) /*!< PMU SWINC: Event Counter 28 Software Increment Mask */ + +#define PMU_SWINC_CNT29_Pos 29U /*!< PMU SWINC: Event Counter 29 Software Increment Position */ +#define PMU_SWINC_CNT29_Msk (1UL << PMU_SWINC_CNT29_Pos) /*!< PMU SWINC: Event Counter 29 Software Increment Mask */ + +#define PMU_SWINC_CNT30_Pos 30U /*!< PMU SWINC: Event Counter 30 Software Increment Position */ +#define PMU_SWINC_CNT30_Msk (1UL << PMU_SWINC_CNT30_Pos) /*!< PMU SWINC: Event Counter 30 Software Increment Mask */ + +/** \brief PMU Control Register Definitions */ + +#define PMU_CTRL_ENABLE_Pos 0U /*!< PMU CTRL: ENABLE Position */ +#define PMU_CTRL_ENABLE_Msk (1UL /*<< PMU_CTRL_ENABLE_Pos*/) /*!< PMU CTRL: ENABLE Mask */ + +#define PMU_CTRL_EVENTCNT_RESET_Pos 1U /*!< PMU CTRL: Event Counter Reset Position */ +#define PMU_CTRL_EVENTCNT_RESET_Msk (1UL << PMU_CTRL_EVENTCNT_RESET_Pos) /*!< PMU CTRL: Event Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_RESET_Pos 2U /*!< PMU CTRL: Cycle Counter Reset Position */ +#define PMU_CTRL_CYCCNT_RESET_Msk (1UL << PMU_CTRL_CYCCNT_RESET_Pos) /*!< PMU CTRL: Cycle Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_DISABLE_Pos 5U /*!< PMU CTRL: Disable Cycle Counter Position */ +#define PMU_CTRL_CYCCNT_DISABLE_Msk (1UL << PMU_CTRL_CYCCNT_DISABLE_Pos) /*!< PMU CTRL: Disable Cycle Counter Mask */ + +#define PMU_CTRL_FRZ_ON_OV_Pos 9U /*!< PMU CTRL: Freeze-on-overflow Position */ +#define PMU_CTRL_FRZ_ON_OV_Msk (1UL << PMU_CTRL_FRZ_ON_OVERFLOW_Pos) /*!< PMU CTRL: Freeze-on-overflow Mask */ + +#define PMU_CTRL_TRACE_ON_OV_Pos 11U /*!< PMU CTRL: Trace-on-overflow Position */ +#define PMU_CTRL_TRACE_ON_OV_Msk (1UL << PMU_CTRL_TRACE_ON_OVERFLOW_Pos) /*!< PMU CTRL: Trace-on-overflow Mask */ + +/** \brief PMU Type Register Definitions */ + +#define PMU_TYPE_NUM_CNTS_Pos 0U /*!< PMU TYPE: Number of Counters Position */ +#define PMU_TYPE_NUM_CNTS_Msk (8UL /*<< PMU_TYPE_NUM_CNTS_Pos*/) /*!< PMU TYPE: Number of Counters Mask */ + +#define PMU_TYPE_SIZE_CNTS_Pos 8U /*!< PMU TYPE: Size of Counters Position */ +#define PMU_TYPE_SIZE_CNTS_Msk (6UL << PMU_TYPE_SIZE_CNTS_Pos) /*!< PMU TYPE: Size of Counters Mask */ + +#define PMU_TYPE_CYCCNT_PRESENT_Pos 14U /*!< PMU TYPE: Cycle Counter Present Position */ +#define PMU_TYPE_CYCCNT_PRESENT_Msk (1UL << PMU_TYPE_CYCCNT_PRESENT_Pos) /*!< PMU TYPE: Cycle Counter Present Mask */ + +#define PMU_TYPE_FRZ_OV_SUPPORT_Pos 21U /*!< PMU TYPE: Freeze-on-overflow Support Position */ +#define PMU_TYPE_FRZ_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Freeze-on-overflow Support Mask */ + +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Pos 23U /*!< PMU TYPE: Trace-on-overflow Support Position */ +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Trace-on-overflow Support Mask */ + +/*@} end of group CMSIS_PMU */ +#endif + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_PXN_Pos 4U /*!< MPU RLAR: PXN Position */ +#define MPU_RLAR_PXN_Msk (1UL << MPU_RLAR_PXN_Pos) /*!< MPU RLAR: PXN Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +#define FPU_FPDSCR_FZ16_Pos 19U /*!< FPDSCR: FZ16 bit Position */ +#define FPU_FPDSCR_FZ16_Msk (1UL << FPU_FPDSCR_FZ16_Pos) /*!< FPDSCR: FZ16 bit Mask */ + +#define FPU_FPDSCR_LTPSIZE_Pos 16U /*!< FPDSCR: LTPSIZE bit Position */ +#define FPU_FPDSCR_LTPSIZE_Msk (7UL << FPU_FPDSCR_LTPSIZE_Pos) /*!< FPDSCR: LTPSIZE bit Mask */ + +/* Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FPRound_Pos 28U /*!< MVFR0: FPRound bits Position */ +#define FPU_MVFR0_FPRound_Msk (0xFUL << FPU_MVFR0_FPRound_Pos) /*!< MVFR0: FPRound bits Mask */ + +#define FPU_MVFR0_FPSqrt_Pos 20U /*!< MVFR0: FPSqrt bits Position */ +#define FPU_MVFR0_FPSqrt_Msk (0xFUL << FPU_MVFR0_FPSqrt_Pos) /*!< MVFR0: FPSqrt bits Mask */ + +#define FPU_MVFR0_FPDivide_Pos 16U /*!< MVFR0: FPDivide bits Position */ +#define FPU_MVFR0_FPDivide_Msk (0xFUL << FPU_MVFR0_FPDivide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FPDP_Pos 8U /*!< MVFR0: FPDP bits Position */ +#define FPU_MVFR0_FPDP_Msk (0xFUL << FPU_MVFR0_FPDP_Pos) /*!< MVFR0: FPDP bits Mask */ + +#define FPU_MVFR0_FPSP_Pos 4U /*!< MVFR0: FPSP bits Position */ +#define FPU_MVFR0_FPSP_Msk (0xFUL << FPU_MVFR0_FPSP_Pos) /*!< MVFR0: FPSP bits Mask */ + +#define FPU_MVFR0_SIMDReg_Pos 0U /*!< MVFR0: SIMDReg bits Position */ +#define FPU_MVFR0_SIMDReg_Msk (0xFUL /*<< FPU_MVFR0_SIMDReg_Pos*/) /*!< MVFR0: SIMDReg bits Mask */ + +/* Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FMAC_Pos 28U /*!< MVFR1: FMAC bits Position */ +#define FPU_MVFR1_FMAC_Msk (0xFUL << FPU_MVFR1_FMAC_Pos) /*!< MVFR1: FMAC bits Mask */ + +#define FPU_MVFR1_FPHP_Pos 24U /*!< MVFR1: FPHP bits Position */ +#define FPU_MVFR1_FPHP_Msk (0xFUL << FPU_MVFR1_FPHP_Pos) /*!< MVFR1: FPHP bits Mask */ + +#define FPU_MVFR1_FP16_Pos 20U /*!< MVFR1: FP16 bits Position */ +#define FPU_MVFR1_FP16_Msk (0xFUL << FPU_MVFR1_FP16_Pos) /*!< MVFR1: FP16 bits Mask */ + +#define FPU_MVFR1_MVE_Pos 8U /*!< MVFR1: MVE bits Position */ +#define FPU_MVFR1_MVE_Msk (0xFUL << FPU_MVFR1_MVE_Pos) /*!< MVFR1: MVE bits Mask */ + +#define FPU_MVFR1_FPDNaN_Pos 4U /*!< MVFR1: FPDNaN bits Position */ +#define FPU_MVFR1_FPDNaN_Msk (0xFUL << FPU_MVFR1_FPDNaN_Pos) /*!< MVFR1: FPDNaN bits Mask */ + +#define FPU_MVFR1_FPFtZ_Pos 0U /*!< MVFR1: FPFtZ bits Position */ +#define FPU_MVFR1_FPFtZ_Msk (0xFUL /*<< FPU_MVFR1_FPFtZ_Pos*/) /*!< MVFR1: FPFtZ bits Mask */ + +/* Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: FPMisc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: FPMisc bits Mask */ + +/*@} end of group CMSIS_FPU */ + +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + __OM uint32_t DSCEMCR; /*!< Offset: 0x010 ( /W) Debug Set Clear Exception and Monitor Control Register */ + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_FPD_Pos 23U /*!< \deprecated CoreDebug DHCSR: S_FPD Position */ +#define CoreDebug_DHCSR_S_FPD_Msk (1UL << CoreDebug_DHCSR_S_FPD_Pos) /*!< \deprecated CoreDebug DHCSR: S_FPD Mask */ + +#define CoreDebug_DHCSR_S_SUIDE_Pos 22U /*!< \deprecated CoreDebug DHCSR: S_SUIDE Position */ +#define CoreDebug_DHCSR_S_SUIDE_Msk (1UL << CoreDebug_DHCSR_S_SUIDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_SUIDE Mask */ + +#define CoreDebug_DHCSR_S_NSUIDE_Pos 21U /*!< \deprecated CoreDebug DHCSR: S_NSUIDE Position */ +#define CoreDebug_DHCSR_S_NSUIDE_Msk (1UL << CoreDebug_DHCSR_S_NSUIDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_NSUIDE Mask */ + +#define CoreDebug_DHCSR_S_SDE_Pos 20U /*!< \deprecated CoreDebug DHCSR: S_SDE Position */ +#define CoreDebug_DHCSR_S_SDE_Msk (1UL << CoreDebug_DHCSR_S_SDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_SDE Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_PMOV_Pos 6U /*!< \deprecated CoreDebug DHCSR: C_PMOV Position */ +#define CoreDebug_DHCSR_C_PMOV_Msk (1UL << CoreDebug_DHCSR_C_PMOV_Pos) /*!< \deprecated CoreDebug DHCSR: C_PMOV Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< \deprecated CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< \deprecated CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< \deprecated CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< \deprecated CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< \deprecated CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< \deprecated CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< \deprecated CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< \deprecated CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< \deprecated CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< \deprecated CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Set Clear Exception and Monitor Control Register Definitions */ +#define CoreDebug_DSCEMCR_CLR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_REQ, Position */ +#define CoreDebug_DSCEMCR_CLR_MON_REQ_Msk (1UL << CoreDebug_DSCEMCR_CLR_MON_REQ_Pos) /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_REQ, Mask */ + +#define CoreDebug_DSCEMCR_CLR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_PEND, Position */ +#define CoreDebug_DSCEMCR_CLR_MON_PEND_Msk (1UL << CoreDebug_DSCEMCR_CLR_MON_PEND_Pos) /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_PEND, Mask */ + +#define CoreDebug_DSCEMCR_SET_MON_REQ_Pos 3U /*!< \deprecated CoreDebug DSCEMCR: SET_MON_REQ, Position */ +#define CoreDebug_DSCEMCR_SET_MON_REQ_Msk (1UL << CoreDebug_DSCEMCR_SET_MON_REQ_Pos) /*!< \deprecated CoreDebug DSCEMCR: SET_MON_REQ, Mask */ + +#define CoreDebug_DSCEMCR_SET_MON_PEND_Pos 1U /*!< \deprecated CoreDebug DSCEMCR: SET_MON_PEND, Position */ +#define CoreDebug_DSCEMCR_SET_MON_PEND_Msk (1UL << CoreDebug_DSCEMCR_SET_MON_PEND_Pos) /*!< \deprecated CoreDebug DSCEMCR: SET_MON_PEND, Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_UIDEN_Pos 10U /*!< \deprecated CoreDebug DAUTHCTRL: UIDEN, Position */ +#define CoreDebug_DAUTHCTRL_UIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_UIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: UIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_UIDAPEN_Pos 9U /*!< \deprecated CoreDebug DAUTHCTRL: UIDAPEN, Position */ +#define CoreDebug_DAUTHCTRL_UIDAPEN_Msk (1UL << CoreDebug_DAUTHCTRL_UIDAPEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: UIDAPEN, Mask */ + +#define CoreDebug_DAUTHCTRL_FSDMA_Pos 8U /*!< \deprecated CoreDebug DAUTHCTRL: FSDMA, Position */ +#define CoreDebug_DAUTHCTRL_FSDMA_Msk (1UL << CoreDebug_DAUTHCTRL_FSDMA_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: FSDMA, Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + __OM uint32_t DSCEMCR; /*!< Offset: 0x010 ( /W) Debug Set Clear Exception and Monitor Control Register */ + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_FPD_Pos 23U /*!< DCB DHCSR: Floating-point registers Debuggable Position */ +#define DCB_DHCSR_S_FPD_Msk (0x1UL << DCB_DHCSR_S_FPD_Pos) /*!< DCB DHCSR: Floating-point registers Debuggable Mask */ + +#define DCB_DHCSR_S_SUIDE_Pos 22U /*!< DCB DHCSR: Secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_SUIDE_Msk (0x1UL << DCB_DHCSR_S_SUIDE_Pos) /*!< DCB DHCSR: Secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_NSUIDE_Pos 21U /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_NSUIDE_Msk (0x1UL << DCB_DHCSR_S_NSUIDE_Pos) /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_PMOV_Pos 6U /*!< DCB DHCSR: Halt on PMU overflow control Position */ +#define DCB_DHCSR_C_PMOV_Msk (0x1UL << DCB_DHCSR_C_PMOV_Pos) /*!< DCB DHCSR: Halt on PMU overflow control Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (0x1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (0x1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (0x1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (0x1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (0x1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (0x1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (0x1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (0x1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (0x1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (0x1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (0x1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (0x1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (0x1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (0x1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (0x1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DSCEMCR, Debug Set Clear Exception and Monitor Control Register Definitions */ +#define DCB_DSCEMCR_CLR_MON_REQ_Pos 19U /*!< DCB DSCEMCR: Clear monitor request Position */ +#define DCB_DSCEMCR_CLR_MON_REQ_Msk (0x1UL << DCB_DSCEMCR_CLR_MON_REQ_Pos) /*!< DCB DSCEMCR: Clear monitor request Mask */ + +#define DCB_DSCEMCR_CLR_MON_PEND_Pos 17U /*!< DCB DSCEMCR: Clear monitor pend Position */ +#define DCB_DSCEMCR_CLR_MON_PEND_Msk (0x1UL << DCB_DSCEMCR_CLR_MON_PEND_Pos) /*!< DCB DSCEMCR: Clear monitor pend Mask */ + +#define DCB_DSCEMCR_SET_MON_REQ_Pos 3U /*!< DCB DSCEMCR: Set monitor request Position */ +#define DCB_DSCEMCR_SET_MON_REQ_Msk (0x1UL << DCB_DSCEMCR_SET_MON_REQ_Pos) /*!< DCB DSCEMCR: Set monitor request Mask */ + +#define DCB_DSCEMCR_SET_MON_PEND_Pos 1U /*!< DCB DSCEMCR: Set monitor pend Position */ +#define DCB_DSCEMCR_SET_MON_PEND_Msk (0x1UL << DCB_DSCEMCR_SET_MON_PEND_Pos) /*!< DCB DSCEMCR: Set monitor pend Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_UIDEN_Pos 10U /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Position */ +#define DCB_DAUTHCTRL_UIDEN_Msk (0x1UL << DCB_DAUTHCTRL_UIDEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Mask */ + +#define DCB_DAUTHCTRL_UIDAPEN_Pos 9U /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Position */ +#define DCB_DAUTHCTRL_UIDAPEN_Msk (0x1UL << DCB_DAUTHCTRL_UIDAPEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Mask */ + +#define DCB_DAUTHCTRL_FSDMA_Pos 8U /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Position */ +#define DCB_DAUTHCTRL_FSDMA_Msk (0x1UL << DCB_DAUTHCTRL_FSDMA_Pos) /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Mask */ + +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SUNID_Pos 22U /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUNID_Msk (0x3UL << DIB_DAUTHSTATUS_SUNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SUID_Pos 20U /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUID_Msk (0x3UL << DIB_DAUTHSTATUS_SUID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_NSUNID_Pos 18U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Position */ +#define DIB_DAUTHSTATUS_NSUNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Mask */ + +#define DIB_DAUTHSTATUS_NSUID_Pos 16U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_NSUID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + #define PMU_BASE (0xE0003000UL) /*!< PMU Base Address */ + #define PMU ((PMU_Type *) PMU_BASE ) /*!< PMU configuration struct */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCnSCB_NS ((SCnSCB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## PMU functions and events #################################### */ + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + +#include "pmu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + +/* ########################## MVE functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_MveFunctions MVE Functions + \brief Function that provides MVE type. + @{ + */ + +/** + \brief get MVE type + \details returns the MVE type + \returns + - \b 0: No Vector Extension (MVE) + - \b 1: Integer Vector Extension (MVE-I) + - \b 2: Floating-point Vector Extension (MVE-F) + */ +__STATIC_INLINE uint32_t SCB_GetMVEType(void) +{ + const uint32_t mvfr1 = FPU->MVFR1; + if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x2U << FPU_MVFR1_MVE_Pos)) + { + return 2U; + } + else if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x1U << FPU_MVFR1_MVE_Pos)) + { + return 1U; + } + else + { + return 0U; + } +} + + +/*@} end of CMSIS_Core_MveFunctions */ + + +/* ########################## Cache functions #################################### */ + +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) +#include "cachel1_armv7.h" +#endif + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_ARMV81MML_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_armv8mbl.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_armv8mbl.h new file mode 100755 index 00000000000..932d3d188bf --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_armv8mbl.h @@ -0,0 +1,2222 @@ +/**************************************************************************//** + * @file core_armv8mbl.h + * @brief CMSIS Armv8-M Baseline Core Peripheral Access Layer Header File + * @version V5.1.0 + * @date 27. March 2020 + ******************************************************************************/ +/* + * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_ARMV8MBL_H_GENERIC +#define __CORE_ARMV8MBL_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_ARMv8MBL + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS definitions */ +#define __ARMv8MBL_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __ARMv8MBL_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __ARMv8MBL_CMSIS_VERSION ((__ARMv8MBL_CMSIS_VERSION_MAIN << 16U) | \ + __ARMv8MBL_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (2U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_ARMV8MBL_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_ARMV8MBL_H_DEPENDANT +#define __CORE_ARMV8MBL_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __ARMv8MBL_REV + #define __ARMv8MBL_REV 0x0000U + #warning "__ARMv8MBL_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 0U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif + + #ifndef __ETM_PRESENT + #define __ETM_PRESENT 0U + #warning "__ETM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MTB_PRESENT + #define __MTB_PRESENT 0U + #warning "__MTB_PRESENT not defined in device header file; using default!" + #endif + +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group ARMv8MBL */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint32_t IPR[124U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ +#else + uint32_t RESERVED0; +#endif + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHPR[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#endif + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + uint32_t RESERVED0[6U]; + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x3UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Sizes Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Sizes Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[809U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) Software Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) Software Lock Status Register */ + uint32_t RESERVED4[4U]; + __IM uint32_t TYPE; /*!< Offset: 0xFC8 (R/ ) Device Identifier Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_SWOSCALER_Pos 0U /*!< TPI ACPR: SWOSCALER Position */ +#define TPI_ACPR_SWOSCALER_Msk (0xFFFFUL /*<< TPI_ACPR_SWOSCALER_Pos*/) /*!< TPI ACPR: SWOSCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI Periodic Synchronization Control Register Definitions */ +#define TPI_PSCR_PSCount_Pos 0U /*!< TPI PSCR: PSCount Position */ +#define TPI_PSCR_PSCount_Msk (0x1FUL /*<< TPI_PSCR_PSCount_Pos*/) /*!< TPI PSCR: TPSCount Mask */ + +/* TPI Software Lock Status Register Definitions */ +#define TPI_LSR_nTT_Pos 1U /*!< TPI LSR: Not thirty-two bit. Position */ +#define TPI_LSR_nTT_Msk (0x1UL << TPI_LSR_nTT_Pos) /*!< TPI LSR: Not thirty-two bit. Mask */ + +#define TPI_LSR_SLK_Pos 1U /*!< TPI LSR: Software Lock status Position */ +#define TPI_LSR_SLK_Msk (0x1UL << TPI_LSR_SLK_Pos) /*!< TPI LSR: Software Lock status Mask */ + +#define TPI_LSR_SLI_Pos 0U /*!< TPI LSR: Software Lock implemented Position */ +#define TPI_LSR_SLI_Msk (0x1UL /*<< TPI_LSR_SLI_Pos*/) /*!< TPI LSR: Software Lock implemented Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFO depth Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFO depth Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + uint32_t RESERVED0[7U]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 1U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: EN Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: EN Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#endif +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_DWTENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: DWTENA Position */ +#define CoreDebug_DEMCR_DWTENA_Msk (1UL << CoreDebug_DEMCR_DWTENA_Pos) /*!< \deprecated CoreDebug DEMCR: DWTENA Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IPR[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHPR[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHPR[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IPR[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHPR[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + If VTOR is not present address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; +#else + uint32_t *vectors = (uint32_t *)0x0U; +#endif + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; +#else + uint32_t *vectors = (uint32_t *)0x0U; +#endif + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[_IP_IDX(IRQn)] = ((uint32_t)(NVIC_NS->IPR[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB_NS->SHPR[_SHP_IDX(IRQn)] = ((uint32_t)(SCB_NS->SHPR[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IPR[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB_NS->SHPR[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_ARMV8MBL_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_armv8mml.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_armv8mml.h new file mode 100755 index 00000000000..71f000bcadf --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_armv8mml.h @@ -0,0 +1,3196 @@ +/**************************************************************************//** + * @file core_armv8mml.h + * @brief CMSIS Armv8-M Mainline Core Peripheral Access Layer Header File + * @version V5.2.0 + * @date 27. March 2020 + ******************************************************************************/ +/* + * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_ARMV8MML_H_GENERIC +#define __CORE_ARMV8MML_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_ARMv8MML + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS Armv8MML definitions */ +#define __ARMv8MML_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __ARMv8MML_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __ARMv8MML_CMSIS_VERSION ((__ARMv8MML_CMSIS_VERSION_MAIN << 16U) | \ + __ARMv8MML_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (80U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_ARMV8MML_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_ARMV8MML_H_DEPENDANT +#define __CORE_ARMV8MML_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __ARMv8MML_REV + #define __ARMv8MML_REV 0x0000U + #warning "__ARMv8MML_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group ARMv8MML */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED3[92U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + uint32_t RESERVED4[15U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CPn_Pos 0U /*!< SCB NSACR: CPn Position */ +#define SCB_NSACR_CPn_Msk (1UL /*<< SCB_NSACR_CPn_Pos*/) /*!< SCB NSACR: CPn Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED6[4U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Sizes Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Sizes Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[809U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) Software Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) Software Lock Status Register */ + uint32_t RESERVED4[4U]; + __IM uint32_t TYPE; /*!< Offset: 0xFC8 (R/ ) Device Identifier Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_SWOSCALER_Pos 0U /*!< TPI ACPR: SWOSCALER Position */ +#define TPI_ACPR_SWOSCALER_Msk (0xFFFFUL /*<< TPI_ACPR_SWOSCALER_Pos*/) /*!< TPI ACPR: SWOSCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI Periodic Synchronization Control Register Definitions */ +#define TPI_PSCR_PSCount_Pos 0U /*!< TPI PSCR: PSCount Position */ +#define TPI_PSCR_PSCount_Msk (0x1FUL /*<< TPI_PSCR_PSCount_Pos*/) /*!< TPI PSCR: TPSCount Mask */ + +/* TPI Software Lock Status Register Definitions */ +#define TPI_LSR_nTT_Pos 1U /*!< TPI LSR: Not thirty-two bit. Position */ +#define TPI_LSR_nTT_Msk (0x1UL << TPI_LSR_nTT_Pos) /*!< TPI LSR: Not thirty-two bit. Mask */ + +#define TPI_LSR_SLK_Pos 1U /*!< TPI LSR: Software Lock status Position */ +#define TPI_LSR_SLK_Msk (0x1UL << TPI_LSR_SLK_Pos) /*!< TPI LSR: Software Lock status Mask */ + +#define TPI_LSR_SLI_Pos 0U /*!< TPI LSR: Software Lock implemented Position */ +#define TPI_LSR_SLI_Msk (0x1UL /*<< TPI_LSR_SLI_Pos*/) /*!< TPI LSR: Software Lock implemented Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFO depth Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFO depth Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/* Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ +#define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ + +#define FPU_MVFR0_Short_vectors_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_Short_vectors_Msk (0xFUL << FPU_MVFR0_Short_vectors_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_Square_root_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_Square_root_Msk (0xFUL << FPU_MVFR0_Square_root_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_Divide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_Divide_Msk (0xFUL << FPU_MVFR0_Divide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FP_excep_trapping_Pos 12U /*!< MVFR0: FP exception trapping bits Position */ +#define FPU_MVFR0_FP_excep_trapping_Msk (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos) /*!< MVFR0: FP exception trapping bits Mask */ + +#define FPU_MVFR0_Double_precision_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_Double_precision_Msk (0xFUL << FPU_MVFR0_Double_precision_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_Single_precision_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_Single_precision_Msk (0xFUL << FPU_MVFR0_Single_precision_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ +#define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ + +/* Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ +#define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ + +#define FPU_MVFR1_FP_HPFP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FP_HPFP_Msk (0xFUL << FPU_MVFR1_FP_HPFP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_D_NaN_mode_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_D_NaN_mode_Msk (0xFUL << FPU_MVFR1_D_NaN_mode_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/* Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: FPMisc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: FPMisc bits Mask */ + +/*@} end of group CMSIS_FPU */ + +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< \deprecated CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< \deprecated CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< \deprecated CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< \deprecated CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< \deprecated CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< \deprecated CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< \deprecated CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< \deprecated CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< \deprecated CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< \deprecated CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (0x1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (0x1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (0x1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (0x1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (0x1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (0x1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (0x1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (0x1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (0x1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (0x1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (0x1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (0x1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (0x1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (0x1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (0x1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCnSCB_NS ((SCnSCB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + +/* ########################## Cache functions #################################### */ + +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) +#include "cachel1_armv7.h" +#endif + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_ARMV8MML_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm0.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm0.h new file mode 100755 index 00000000000..6441ff34190 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm0.h @@ -0,0 +1,952 @@ +/**************************************************************************//** + * @file core_cm0.h + * @brief CMSIS Cortex-M0 Core Peripheral Access Layer Header File + * @version V5.0.8 + * @date 21. August 2019 + ******************************************************************************/ +/* + * Copyright (c) 2009-2019 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM0_H_GENERIC +#define __CORE_CM0_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M0 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM0 definitions */ +#define __CM0_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM0_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM0_CMSIS_VERSION ((__CM0_CMSIS_VERSION_MAIN << 16U) | \ + __CM0_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (0U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM0_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM0_H_DEPENDANT +#define __CORE_CM0_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM0_REV + #define __CM0_REV 0x0000U + #warning "__CM0_REV not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M0 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t _reserved0:1; /*!< bit: 0 Reserved */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[31U]; + __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[31U]; + __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[31U]; + __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[31U]; + uint32_t RESERVED4[64U]; + __IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + uint32_t RESERVED0; + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Cortex-M0 Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor. + Therefore they are not covered by the Cortex-M0 header file. + @{ + */ +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ +/*#define NVIC_GetActive __NVIC_GetActive not available for Cortex-M0 */ + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + Address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ + *(vectors + (int32_t)IRQn) = vector; /* use pointer arithmetic to access vector */ + /* ARM Application Note 321 states that the M0 does not require the architectural barrier */ +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ + return *(vectors + (int32_t)IRQn); /* use pointer arithmetic to access vector */ +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM0_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm0plus.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm0plus.h new file mode 100755 index 00000000000..4e7179a6146 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm0plus.h @@ -0,0 +1,1087 @@ +/**************************************************************************//** + * @file core_cm0plus.h + * @brief CMSIS Cortex-M0+ Core Peripheral Access Layer Header File + * @version V5.0.9 + * @date 21. August 2019 + ******************************************************************************/ +/* + * Copyright (c) 2009-2019 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM0PLUS_H_GENERIC +#define __CORE_CM0PLUS_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex-M0+ + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM0+ definitions */ +#define __CM0PLUS_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM0PLUS_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM0PLUS_CMSIS_VERSION ((__CM0PLUS_CMSIS_VERSION_MAIN << 16U) | \ + __CM0PLUS_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (0U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM0PLUS_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM0PLUS_H_DEPENDANT +#define __CORE_CM0PLUS_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM0PLUS_REV + #define __CM0PLUS_REV 0x0000U + #warning "__CM0PLUS_REV not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 0U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex-M0+ */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core MPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[31U]; + __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[31U]; + __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[31U]; + __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[31U]; + uint32_t RESERVED4[64U]; + __IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ +#else + uint32_t RESERVED0; +#endif + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) +/* SCB Interrupt Control State Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 8U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0xFFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#endif + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ +} MPU_Type; + +#define MPU_TYPE_RALIASES 1U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 8U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0xFFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Cortex-M0+ Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor. + Therefore they are not covered by the Cortex-M0+ header file. + @{ + */ +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ +/*#define NVIC_GetActive __NVIC_GetActive not available for Cortex-M0+ */ + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + If VTOR is not present address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; +#else + uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ + *(vectors + (int32_t)IRQn) = vector; /* use pointer arithmetic to access vector */ +#endif + /* ARM Application Note 321 states that the M0+ does not require the architectural barrier */ +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +#else + uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */ + return *(vectors + (int32_t)IRQn); /* use pointer arithmetic to access vector */ +#endif +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv7.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM0PLUS_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm1.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm1.h new file mode 100755 index 00000000000..76b4569743a --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm1.h @@ -0,0 +1,979 @@ +/**************************************************************************//** + * @file core_cm1.h + * @brief CMSIS Cortex-M1 Core Peripheral Access Layer Header File + * @version V1.0.1 + * @date 12. November 2018 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM1_H_GENERIC +#define __CORE_CM1_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M1 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM1 definitions */ +#define __CM1_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM1_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM1_CMSIS_VERSION ((__CM1_CMSIS_VERSION_MAIN << 16U) | \ + __CM1_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (1U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM1_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM1_H_DEPENDANT +#define __CORE_CM1_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM1_REV + #define __CM1_REV 0x0100U + #warning "__CM1_REV not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M1 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t _reserved0:1; /*!< bit: 0 Reserved */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[31U]; + __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[31U]; + __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[31U]; + __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[31U]; + uint32_t RESERVED4[64U]; + __IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + uint32_t RESERVED0; + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_ITCMUAEN_Pos 4U /*!< ACTLR: Instruction TCM Upper Alias Enable Position */ +#define SCnSCB_ACTLR_ITCMUAEN_Msk (1UL << SCnSCB_ACTLR_ITCMUAEN_Pos) /*!< ACTLR: Instruction TCM Upper Alias Enable Mask */ + +#define SCnSCB_ACTLR_ITCMLAEN_Pos 3U /*!< ACTLR: Instruction TCM Lower Alias Enable Position */ +#define SCnSCB_ACTLR_ITCMLAEN_Msk (1UL << SCnSCB_ACTLR_ITCMLAEN_Pos) /*!< ACTLR: Instruction TCM Lower Alias Enable Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Cortex-M1 Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor. + Therefore they are not covered by the Cortex-M1 header file. + @{ + */ +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ +/*#define NVIC_GetActive __NVIC_GetActive not available for Cortex-M1 */ + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + Address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)0x0U; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + /* ARM Application Note 321 states that the M1 does not require the architectural barrier */ +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)0x0U; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM1_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm23.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm23.h new file mode 100755 index 00000000000..55fff995096 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm23.h @@ -0,0 +1,2297 @@ +/**************************************************************************//** + * @file core_cm23.h + * @brief CMSIS Cortex-M23 Core Peripheral Access Layer Header File + * @version V5.1.0 + * @date 11. February 2020 + ******************************************************************************/ +/* + * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM23_H_GENERIC +#define __CORE_CM23_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M23 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS definitions */ +#define __CM23_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM23_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM23_CMSIS_VERSION ((__CM23_CMSIS_VERSION_MAIN << 16U) | \ + __CM23_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (23U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM23_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM23_H_DEPENDANT +#define __CORE_CM23_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM23_REV + #define __CM23_REV 0x0000U + #warning "__CM23_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 0U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif + + #ifndef __ETM_PRESENT + #define __ETM_PRESENT 0U + #warning "__ETM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MTB_PRESENT + #define __MTB_PRESENT 0U + #warning "__MTB_PRESENT not defined in device header file; using default!" + #endif + +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M23 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint32_t IPR[124U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ +#else + uint32_t RESERVED0; +#endif + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED1; + __IOM uint32_t SHPR[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#endif + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + uint32_t RESERVED0[6U]; + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x3UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t ITFTTD0; /*!< Offset: 0xEEC (R/ ) Integration Test FIFO Test Data 0 Register */ + __IOM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/W) Integration Test ATB Control Register 2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) Integration Test ATB Control Register 0 */ + __IM uint32_t ITFTTD1; /*!< Offset: 0xEFC (R/ ) Integration Test FIFO Test Data 1 Register */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration Test FIFO Test Data 0 Register Definitions */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD0: ATB Interface 2 ATVALIDPosition */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD0: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD0_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD0: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD0_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data2_Pos 16U /*!< TPI ITFTTD0: ATB Interface 1 data2 Position */ +#define TPI_ITFTTD0_ATB_IF1_data2_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data2 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data1_Pos 8U /*!< TPI ITFTTD0: ATB Interface 1 data1 Position */ +#define TPI_ITFTTD0_ATB_IF1_data1_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data1 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data0_Pos 0U /*!< TPI ITFTTD0: ATB Interface 1 data0 Position */ +#define TPI_ITFTTD0_ATB_IF1_data0_Msk (0xFFUL /*<< TPI_ITFTTD0_ATB_IF1_data0_Pos*/) /*!< TPI ITFTTD0: ATB Interface 1 data0 Mask */ + +/* TPI Integration Test ATB Control Register 2 Register Definitions */ +#define TPI_ITATBCTR2_AFVALID2S_Pos 1U /*!< TPI ITATBCTR2: AFVALID2S Position */ +#define TPI_ITATBCTR2_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID2S_Pos) /*!< TPI ITATBCTR2: AFVALID2SS Mask */ + +#define TPI_ITATBCTR2_AFVALID1S_Pos 1U /*!< TPI ITATBCTR2: AFVALID1S Position */ +#define TPI_ITATBCTR2_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID1S_Pos) /*!< TPI ITATBCTR2: AFVALID1SS Mask */ + +#define TPI_ITATBCTR2_ATREADY2S_Pos 0U /*!< TPI ITATBCTR2: ATREADY2S Position */ +#define TPI_ITATBCTR2_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2S_Pos*/) /*!< TPI ITATBCTR2: ATREADY2S Mask */ + +#define TPI_ITATBCTR2_ATREADY1S_Pos 0U /*!< TPI ITATBCTR2: ATREADY1S Position */ +#define TPI_ITATBCTR2_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1S_Pos*/) /*!< TPI ITATBCTR2: ATREADY1S Mask */ + +/* TPI Integration Test FIFO Test Data 1 Register Definitions */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD1: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD1_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD1: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD1_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data2_Pos 16U /*!< TPI ITFTTD1: ATB Interface 2 data2 Position */ +#define TPI_ITFTTD1_ATB_IF2_data2_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data2 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data1_Pos 8U /*!< TPI ITFTTD1: ATB Interface 2 data1 Position */ +#define TPI_ITFTTD1_ATB_IF2_data1_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data1 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data0_Pos 0U /*!< TPI ITFTTD1: ATB Interface 2 data0 Position */ +#define TPI_ITFTTD1_ATB_IF2_data0_Msk (0xFFUL /*<< TPI_ITFTTD1_ATB_IF2_data0_Pos*/) /*!< TPI ITFTTD1: ATB Interface 2 data0 Mask */ + +/* TPI Integration Test ATB Control Register 0 Definitions */ +#define TPI_ITATBCTR0_AFVALID2S_Pos 1U /*!< TPI ITATBCTR0: AFVALID2S Position */ +#define TPI_ITATBCTR0_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID2S_Pos) /*!< TPI ITATBCTR0: AFVALID2SS Mask */ + +#define TPI_ITATBCTR0_AFVALID1S_Pos 1U /*!< TPI ITATBCTR0: AFVALID1S Position */ +#define TPI_ITATBCTR0_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID1S_Pos) /*!< TPI ITATBCTR0: AFVALID1SS Mask */ + +#define TPI_ITATBCTR0_ATREADY2S_Pos 0U /*!< TPI ITATBCTR0: ATREADY2S Position */ +#define TPI_ITATBCTR0_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2S_Pos*/) /*!< TPI ITATBCTR0: ATREADY2S Mask */ + +#define TPI_ITATBCTR0_ATREADY1S_Pos 0U /*!< TPI ITATBCTR0: ATREADY1S Position */ +#define TPI_ITATBCTR0_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1S_Pos*/) /*!< TPI ITATBCTR0: ATREADY1S Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFOSZ Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFOSZ Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + uint32_t RESERVED0[7U]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 1U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: EN Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: EN Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#endif +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register */ +#define CoreDebug_DEMCR_DWTENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: DWTENA Position */ +#define CoreDebug_DEMCR_DWTENA_Msk (1UL << CoreDebug_DEMCR_DWTENA_Pos) /*!< \deprecated CoreDebug DEMCR: DWTENA Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else +/*#define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping not available for Cortex-M23 */ +/*#define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping not available for Cortex-M23 */ + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + +#define __NVIC_SetPriorityGrouping(X) (void)(X) +#define __NVIC_GetPriorityGrouping() (0U) + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IPR[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHPR[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHPR[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IPR[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHPR[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + If VTOR is not present address 0 must be mapped to SRAM. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; +#else + uint32_t *vectors = (uint32_t *)0x0U; +#endif + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ +#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) + uint32_t *vectors = (uint32_t *)SCB->VTOR; +#else + uint32_t *vectors = (uint32_t *)0x0U; +#endif + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[_IP_IDX(IRQn)] = ((uint32_t)(NVIC_NS->IPR[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB_NS->SHPR[_SHP_IDX(IRQn)] = ((uint32_t)(SCB_NS->SHPR[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IPR[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB_NS->SHPR[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM23_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm3.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm3.h new file mode 100755 index 00000000000..24453a88633 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm3.h @@ -0,0 +1,1943 @@ +/**************************************************************************//** + * @file core_cm3.h + * @brief CMSIS Cortex-M3 Core Peripheral Access Layer Header File + * @version V5.1.1 + * @date 27. March 2020 + ******************************************************************************/ +/* + * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM3_H_GENERIC +#define __CORE_CM3_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M3 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM3 definitions */ +#define __CM3_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM3_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM3_CMSIS_VERSION ((__CM3_CMSIS_VERSION_MAIN << 16U) | \ + __CM3_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (3U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM3_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM3_H_DEPENDANT +#define __CORE_CM3_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM3_REV + #define __CM3_REV 0x0200U + #warning "__CM3_REV not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M3 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:27; /*!< bit: 0..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:1; /*!< bit: 9 Reserved */ + uint32_t ICI_IT_1:6; /*!< bit: 10..15 ICI/IT part 1 */ + uint32_t _reserved1:8; /*!< bit: 16..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit */ + uint32_t ICI_IT_2:2; /*!< bit: 25..26 ICI/IT part 2 */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_ICI_IT_2_Pos 25U /*!< xPSR: ICI/IT part 2 Position */ +#define xPSR_ICI_IT_2_Msk (3UL << xPSR_ICI_IT_2_Pos) /*!< xPSR: ICI/IT part 2 Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ICI_IT_1_Pos 10U /*!< xPSR: ICI/IT part 1 Position */ +#define xPSR_ICI_IT_1_Msk (0x3FUL << xPSR_ICI_IT_1_Pos) /*!< xPSR: ICI/IT part 1 Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[24U]; + __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[24U]; + __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[24U]; + __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[24U]; + __IOM uint32_t IABR[8U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[56U]; + __IOM uint8_t IP[240U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED5[644U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHP[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[5U]; + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#if defined (__CM3_REV) && (__CM3_REV < 0x0201U) /* core r2p1 */ +#define SCB_VTOR_TBLBASE_Pos 29U /*!< SCB VTOR: TBLBASE Position */ +#define SCB_VTOR_TBLBASE_Msk (1UL << SCB_VTOR_TBLBASE_Pos) /*!< SCB VTOR: TBLBASE Mask */ + +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x3FFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#else +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ +#endif + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +#define SCB_AIRCR_VECTRESET_Pos 0U /*!< SCB AIRCR: VECTRESET Position */ +#define SCB_AIRCR_VECTRESET_Msk (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/) /*!< SCB AIRCR: VECTRESET Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +#define SCB_CCR_NONBASETHRDENA_Pos 0U /*!< SCB CCR: NONBASETHRDENA Position */ +#define SCB_CCR_NONBASETHRDENA_Msk (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/) /*!< SCB CCR: NONBASETHRDENA Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ +#if defined (__CM3_REV) && (__CM3_REV >= 0x200U) + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +#else + uint32_t RESERVED1[1U]; +#endif +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/* Auxiliary Control Register Definitions */ +#if defined (__CM3_REV) && (__CM3_REV >= 0x200U) +#define SCnSCB_ACTLR_DISOOFP_Pos 9U /*!< ACTLR: DISOOFP Position */ +#define SCnSCB_ACTLR_DISOOFP_Msk (1UL << SCnSCB_ACTLR_DISOOFP_Pos) /*!< ACTLR: DISOOFP Mask */ + +#define SCnSCB_ACTLR_DISFPCA_Pos 8U /*!< ACTLR: DISFPCA Position */ +#define SCnSCB_ACTLR_DISFPCA_Msk (1UL << SCnSCB_ACTLR_DISFPCA_Pos) /*!< ACTLR: DISFPCA Mask */ + +#define SCnSCB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define SCnSCB_ACTLR_DISFOLD_Msk (1UL << SCnSCB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +#define SCnSCB_ACTLR_DISDEFWBUF_Pos 1U /*!< ACTLR: DISDEFWBUF Position */ +#define SCnSCB_ACTLR_DISDEFWBUF_Msk (1UL << SCnSCB_ACTLR_DISDEFWBUF_Pos) /*!< ACTLR: DISDEFWBUF Mask */ + +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ +#endif + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[6U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TraceBusID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TraceBusID_Msk (0x7FUL << ITM_TCR_TraceBusID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPrescale_Pos 8U /*!< ITM TCR: TSPrescale Position */ +#define ITM_TCR_TSPrescale_Msk (3UL << ITM_TCR_TSPrescale_Pos) /*!< ITM TCR: TSPrescale Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + __IOM uint32_t MASK0; /*!< Offset: 0x024 (R/W) Mask Register 0 */ + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED0[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + __IOM uint32_t MASK1; /*!< Offset: 0x034 (R/W) Mask Register 1 */ + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + __IOM uint32_t MASK2; /*!< Offset: 0x044 (R/W) Mask Register 2 */ + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + __IOM uint32_t MASK3; /*!< Offset: 0x054 (R/W) Mask Register 3 */ + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Mask Register Definitions */ +#define DWT_MASK_MASK_Pos 0U /*!< DWT MASK: MASK Position */ +#define DWT_MASK_MASK_Msk (0x1FUL /*<< DWT_MASK_MASK_Pos*/) /*!< DWT MASK: MASK Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVADDR1_Pos 16U /*!< DWT FUNCTION: DATAVADDR1 Position */ +#define DWT_FUNCTION_DATAVADDR1_Msk (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos) /*!< DWT FUNCTION: DATAVADDR1 Mask */ + +#define DWT_FUNCTION_DATAVADDR0_Pos 12U /*!< DWT FUNCTION: DATAVADDR0 Position */ +#define DWT_FUNCTION_DATAVADDR0_Msk (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos) /*!< DWT FUNCTION: DATAVADDR0 Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_LNK1ENA_Pos 9U /*!< DWT FUNCTION: LNK1ENA Position */ +#define DWT_FUNCTION_LNK1ENA_Msk (0x1UL << DWT_FUNCTION_LNK1ENA_Pos) /*!< DWT FUNCTION: LNK1ENA Mask */ + +#define DWT_FUNCTION_DATAVMATCH_Pos 8U /*!< DWT FUNCTION: DATAVMATCH Position */ +#define DWT_FUNCTION_DATAVMATCH_Msk (0x1UL << DWT_FUNCTION_DATAVMATCH_Pos) /*!< DWT FUNCTION: DATAVMATCH Mask */ + +#define DWT_FUNCTION_CYCMATCH_Pos 7U /*!< DWT FUNCTION: CYCMATCH Position */ +#define DWT_FUNCTION_CYCMATCH_Msk (0x1UL << DWT_FUNCTION_CYCMATCH_Pos) /*!< DWT FUNCTION: CYCMATCH Mask */ + +#define DWT_FUNCTION_EMITRANGE_Pos 5U /*!< DWT FUNCTION: EMITRANGE Position */ +#define DWT_FUNCTION_EMITRANGE_Msk (0x1UL << DWT_FUNCTION_EMITRANGE_Pos) /*!< DWT FUNCTION: EMITRANGE Mask */ + +#define DWT_FUNCTION_FUNCTION_Pos 0U /*!< DWT FUNCTION: FUNCTION Position */ +#define DWT_FUNCTION_FUNCTION_Msk (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/) /*!< DWT FUNCTION: FUNCTION Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IM uint32_t FSCR; /*!< Offset: 0x308 (R/ ) Formatter Synchronization Counter Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t FIFO0; /*!< Offset: 0xEEC (R/ ) Integration ETM Data */ + __IM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/ ) ITATBCTR2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) ITATBCTR0 */ + __IM uint32_t FIFO1; /*!< Offset: 0xEFC (R/ ) Integration ITM Data */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) TPIU_DEVID */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) TPIU_DEVTYPE */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration ETM Data Register Definitions (FIFO0) */ +#define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ +#define TPI_FIFO0_ITM_ATVALID_Msk (0x1UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ + +#define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ +#define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ + +#define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ +#define TPI_FIFO0_ETM_ATVALID_Msk (0x1UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ + +#define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ +#define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ + +#define TPI_FIFO0_ETM2_Pos 16U /*!< TPI FIFO0: ETM2 Position */ +#define TPI_FIFO0_ETM2_Msk (0xFFUL << TPI_FIFO0_ETM2_Pos) /*!< TPI FIFO0: ETM2 Mask */ + +#define TPI_FIFO0_ETM1_Pos 8U /*!< TPI FIFO0: ETM1 Position */ +#define TPI_FIFO0_ETM1_Msk (0xFFUL << TPI_FIFO0_ETM1_Pos) /*!< TPI FIFO0: ETM1 Mask */ + +#define TPI_FIFO0_ETM0_Pos 0U /*!< TPI FIFO0: ETM0 Position */ +#define TPI_FIFO0_ETM0_Msk (0xFFUL /*<< TPI_FIFO0_ETM0_Pos*/) /*!< TPI FIFO0: ETM0 Mask */ + +/* TPI ITATBCTR2 Register Definitions */ +#define TPI_ITATBCTR2_ATREADY2_Pos 0U /*!< TPI ITATBCTR2: ATREADY2 Position */ +#define TPI_ITATBCTR2_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2_Pos*/) /*!< TPI ITATBCTR2: ATREADY2 Mask */ + +#define TPI_ITATBCTR2_ATREADY1_Pos 0U /*!< TPI ITATBCTR2: ATREADY1 Position */ +#define TPI_ITATBCTR2_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1_Pos*/) /*!< TPI ITATBCTR2: ATREADY1 Mask */ + +/* TPI Integration ITM Data Register Definitions (FIFO1) */ +#define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ +#define TPI_FIFO1_ITM_ATVALID_Msk (0x1UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ + +#define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ +#define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ + +#define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ +#define TPI_FIFO1_ETM_ATVALID_Msk (0x1UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ + +#define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ +#define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ + +#define TPI_FIFO1_ITM2_Pos 16U /*!< TPI FIFO1: ITM2 Position */ +#define TPI_FIFO1_ITM2_Msk (0xFFUL << TPI_FIFO1_ITM2_Pos) /*!< TPI FIFO1: ITM2 Mask */ + +#define TPI_FIFO1_ITM1_Pos 8U /*!< TPI FIFO1: ITM1 Position */ +#define TPI_FIFO1_ITM1_Msk (0xFFUL << TPI_FIFO1_ITM1_Pos) /*!< TPI FIFO1: ITM1 Mask */ + +#define TPI_FIFO1_ITM0_Pos 0U /*!< TPI FIFO1: ITM0 Position */ +#define TPI_FIFO1_ITM0_Msk (0xFFUL /*<< TPI_FIFO1_ITM0_Pos*/) /*!< TPI FIFO1: ITM0 Mask */ + +/* TPI ITATBCTR0 Register Definitions */ +#define TPI_ITATBCTR0_ATREADY2_Pos 0U /*!< TPI ITATBCTR0: ATREADY2 Position */ +#define TPI_ITATBCTR0_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2_Pos*/) /*!< TPI ITATBCTR0: ATREADY2 Mask */ + +#define TPI_ITATBCTR0_ATREADY1_Pos 0U /*!< TPI ITATBCTR0: ATREADY1 Position */ +#define TPI_ITATBCTR0_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1_Pos*/) /*!< TPI ITATBCTR0: ATREADY1 Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_MinBufSz_Pos 6U /*!< TPI DEVID: MinBufSz Position */ +#define TPI_DEVID_MinBufSz_Msk (0x7UL << TPI_DEVID_MinBufSz_Pos) /*!< TPI DEVID: MinBufSz Mask */ + +#define TPI_DEVID_AsynClkIn_Pos 5U /*!< TPI DEVID: AsynClkIn Position */ +#define TPI_DEVID_AsynClkIn_Msk (0x1UL << TPI_DEVID_AsynClkIn_Pos) /*!< TPI DEVID: AsynClkIn Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x1FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Alias 1 Region Base Address Register */ + __IOM uint32_t RASR_A1; /*!< Offset: 0x018 (R/W) MPU Alias 1 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Alias 2 Region Base Address Register */ + __IOM uint32_t RASR_A2; /*!< Offset: 0x020 (R/W) MPU Alias 2 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Alias 3 Region Base Address Register */ + __IOM uint32_t RASR_A3; /*!< Offset: 0x028 (R/W) MPU Alias 3 Region Attribute and Size Register */ +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 5U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ +#define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ +#define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ +#define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ +#define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ +#define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ +#define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ +#define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE) /*!< Core Debug configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IP[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + /* ARM Application Note 321 states that the M3 does not require the architectural barrier */ +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv7.h" + +#endif + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM3_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm33.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm33.h new file mode 100755 index 00000000000..13359be3ed0 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm33.h @@ -0,0 +1,3264 @@ +/**************************************************************************//** + * @file core_cm33.h + * @brief CMSIS Cortex-M33 Core Peripheral Access Layer Header File + * @version V5.2.0 + * @date 27. March 2020 + ******************************************************************************/ +/* + * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM33_H_GENERIC +#define __CORE_CM33_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M33 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM33 definitions */ +#define __CM33_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM33_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM33_CMSIS_VERSION ((__CM33_CMSIS_VERSION_MAIN << 16U) | \ + __CM33_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (33U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM33_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM33_H_DEPENDANT +#define __CORE_CM33_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM33_REV + #define __CM33_REV 0x0000U + #warning "__CM33_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M33 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED3[92U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + uint32_t RESERVED4[15U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CPn_Pos 0U /*!< SCB NSACR: CPn Position */ +#define SCB_NSACR_CPn_Msk (1UL /*<< SCB_NSACR_CPn_Pos*/) /*!< SCB NSACR: CPn Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED6[4U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t ITFTTD0; /*!< Offset: 0xEEC (R/ ) Integration Test FIFO Test Data 0 Register */ + __IOM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/W) Integration Test ATB Control Register 2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) Integration Test ATB Control Register 0 */ + __IM uint32_t ITFTTD1; /*!< Offset: 0xEFC (R/ ) Integration Test FIFO Test Data 1 Register */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration Test FIFO Test Data 0 Register Definitions */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD0: ATB Interface 2 ATVALIDPosition */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD0: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD0_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD0: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD0_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data2_Pos 16U /*!< TPI ITFTTD0: ATB Interface 1 data2 Position */ +#define TPI_ITFTTD0_ATB_IF1_data2_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data2 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data1_Pos 8U /*!< TPI ITFTTD0: ATB Interface 1 data1 Position */ +#define TPI_ITFTTD0_ATB_IF1_data1_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data1 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data0_Pos 0U /*!< TPI ITFTTD0: ATB Interface 1 data0 Position */ +#define TPI_ITFTTD0_ATB_IF1_data0_Msk (0xFFUL /*<< TPI_ITFTTD0_ATB_IF1_data0_Pos*/) /*!< TPI ITFTTD0: ATB Interface 1 data0 Mask */ + +/* TPI Integration Test ATB Control Register 2 Register Definitions */ +#define TPI_ITATBCTR2_AFVALID2S_Pos 1U /*!< TPI ITATBCTR2: AFVALID2S Position */ +#define TPI_ITATBCTR2_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID2S_Pos) /*!< TPI ITATBCTR2: AFVALID2SS Mask */ + +#define TPI_ITATBCTR2_AFVALID1S_Pos 1U /*!< TPI ITATBCTR2: AFVALID1S Position */ +#define TPI_ITATBCTR2_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID1S_Pos) /*!< TPI ITATBCTR2: AFVALID1SS Mask */ + +#define TPI_ITATBCTR2_ATREADY2S_Pos 0U /*!< TPI ITATBCTR2: ATREADY2S Position */ +#define TPI_ITATBCTR2_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2S_Pos*/) /*!< TPI ITATBCTR2: ATREADY2S Mask */ + +#define TPI_ITATBCTR2_ATREADY1S_Pos 0U /*!< TPI ITATBCTR2: ATREADY1S Position */ +#define TPI_ITATBCTR2_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1S_Pos*/) /*!< TPI ITATBCTR2: ATREADY1S Mask */ + +/* TPI Integration Test FIFO Test Data 1 Register Definitions */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD1: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD1_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD1: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD1_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data2_Pos 16U /*!< TPI ITFTTD1: ATB Interface 2 data2 Position */ +#define TPI_ITFTTD1_ATB_IF2_data2_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data2 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data1_Pos 8U /*!< TPI ITFTTD1: ATB Interface 2 data1 Position */ +#define TPI_ITFTTD1_ATB_IF2_data1_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data1 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data0_Pos 0U /*!< TPI ITFTTD1: ATB Interface 2 data0 Position */ +#define TPI_ITFTTD1_ATB_IF2_data0_Msk (0xFFUL /*<< TPI_ITFTTD1_ATB_IF2_data0_Pos*/) /*!< TPI ITFTTD1: ATB Interface 2 data0 Mask */ + +/* TPI Integration Test ATB Control Register 0 Definitions */ +#define TPI_ITATBCTR0_AFVALID2S_Pos 1U /*!< TPI ITATBCTR0: AFVALID2S Position */ +#define TPI_ITATBCTR0_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID2S_Pos) /*!< TPI ITATBCTR0: AFVALID2SS Mask */ + +#define TPI_ITATBCTR0_AFVALID1S_Pos 1U /*!< TPI ITATBCTR0: AFVALID1S Position */ +#define TPI_ITATBCTR0_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID1S_Pos) /*!< TPI ITATBCTR0: AFVALID1SS Mask */ + +#define TPI_ITATBCTR0_ATREADY2S_Pos 0U /*!< TPI ITATBCTR0: ATREADY2S Position */ +#define TPI_ITATBCTR0_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2S_Pos*/) /*!< TPI ITATBCTR0: ATREADY2S Mask */ + +#define TPI_ITATBCTR0_ATREADY1S_Pos 0U /*!< TPI ITATBCTR0: ATREADY1S Position */ +#define TPI_ITATBCTR0_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1S_Pos*/) /*!< TPI ITATBCTR0: ATREADY1S Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFOSZ Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFOSZ Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/* Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ +#define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ + +#define FPU_MVFR0_Short_vectors_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_Short_vectors_Msk (0xFUL << FPU_MVFR0_Short_vectors_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_Square_root_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_Square_root_Msk (0xFUL << FPU_MVFR0_Square_root_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_Divide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_Divide_Msk (0xFUL << FPU_MVFR0_Divide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FP_excep_trapping_Pos 12U /*!< MVFR0: FP exception trapping bits Position */ +#define FPU_MVFR0_FP_excep_trapping_Msk (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos) /*!< MVFR0: FP exception trapping bits Mask */ + +#define FPU_MVFR0_Double_precision_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_Double_precision_Msk (0xFUL << FPU_MVFR0_Double_precision_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_Single_precision_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_Single_precision_Msk (0xFUL << FPU_MVFR0_Single_precision_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ +#define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ + +/* Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ +#define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ + +#define FPU_MVFR1_FP_HPFP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FP_HPFP_Msk (0xFUL << FPU_MVFR1_FP_HPFP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_D_NaN_mode_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_D_NaN_mode_Msk (0xFUL << FPU_MVFR1_D_NaN_mode_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/* Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: FPMisc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: FPMisc bits Mask */ + +/*@} end of group CMSIS_FPU */ + +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< \deprecated CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< \deprecated CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< \deprecated CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< \deprecated CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< \deprecated CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< \deprecated CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< \deprecated CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< \deprecated CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< \deprecated CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< \deprecated CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (0x1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (0x1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (0x1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (0x1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (0x1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (0x1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (0x1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (0x1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (0x1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (0x1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (0x1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (0x1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (0x1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (0x1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (0x1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCnSCB_NS ((SCnSCB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM33_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm35p.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm35p.h new file mode 100755 index 00000000000..6a5f6ad1471 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm35p.h @@ -0,0 +1,3264 @@ +/**************************************************************************//** + * @file core_cm35p.h + * @brief CMSIS Cortex-M35P Core Peripheral Access Layer Header File + * @version V1.1.0 + * @date 27. March 2020 + ******************************************************************************/ +/* + * Copyright (c) 2018-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM35P_H_GENERIC +#define __CORE_CM35P_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M35P + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM35P definitions */ +#define __CM35P_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM35P_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM35P_CMSIS_VERSION ((__CM35P_CMSIS_VERSION_MAIN << 16U) | \ + __CM35P_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (35U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM35P_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM35P_H_DEPENDANT +#define __CORE_CM35P_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM35P_REV + #define __CM35P_REV 0x0000U + #warning "__CM35P_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M35P */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED3[92U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + uint32_t RESERVED4[15U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CPn_Pos 0U /*!< SCB NSACR: CPn Position */ +#define SCB_NSACR_CPn_Msk (1UL /*<< SCB_NSACR_CPn_Pos*/) /*!< SCB NSACR: CPn Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED6[4U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t ITFTTD0; /*!< Offset: 0xEEC (R/ ) Integration Test FIFO Test Data 0 Register */ + __IOM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/W) Integration Test ATB Control Register 2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) Integration Test ATB Control Register 0 */ + __IM uint32_t ITFTTD1; /*!< Offset: 0xEFC (R/ ) Integration Test FIFO Test Data 1 Register */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration Test FIFO Test Data 0 Register Definitions */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD0: ATB Interface 2 ATVALIDPosition */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD0: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD0_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD0: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD0_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data2_Pos 16U /*!< TPI ITFTTD0: ATB Interface 1 data2 Position */ +#define TPI_ITFTTD0_ATB_IF1_data2_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data2 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data1_Pos 8U /*!< TPI ITFTTD0: ATB Interface 1 data1 Position */ +#define TPI_ITFTTD0_ATB_IF1_data1_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data1 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data0_Pos 0U /*!< TPI ITFTTD0: ATB Interface 1 data0 Position */ +#define TPI_ITFTTD0_ATB_IF1_data0_Msk (0xFFUL /*<< TPI_ITFTTD0_ATB_IF1_data0_Pos*/) /*!< TPI ITFTTD0: ATB Interface 1 data0 Mask */ + +/* TPI Integration Test ATB Control Register 2 Register Definitions */ +#define TPI_ITATBCTR2_AFVALID2S_Pos 1U /*!< TPI ITATBCTR2: AFVALID2S Position */ +#define TPI_ITATBCTR2_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID2S_Pos) /*!< TPI ITATBCTR2: AFVALID2SS Mask */ + +#define TPI_ITATBCTR2_AFVALID1S_Pos 1U /*!< TPI ITATBCTR2: AFVALID1S Position */ +#define TPI_ITATBCTR2_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID1S_Pos) /*!< TPI ITATBCTR2: AFVALID1SS Mask */ + +#define TPI_ITATBCTR2_ATREADY2S_Pos 0U /*!< TPI ITATBCTR2: ATREADY2S Position */ +#define TPI_ITATBCTR2_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2S_Pos*/) /*!< TPI ITATBCTR2: ATREADY2S Mask */ + +#define TPI_ITATBCTR2_ATREADY1S_Pos 0U /*!< TPI ITATBCTR2: ATREADY1S Position */ +#define TPI_ITATBCTR2_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1S_Pos*/) /*!< TPI ITATBCTR2: ATREADY1S Mask */ + +/* TPI Integration Test FIFO Test Data 1 Register Definitions */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD1: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD1_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD1: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD1_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data2_Pos 16U /*!< TPI ITFTTD1: ATB Interface 2 data2 Position */ +#define TPI_ITFTTD1_ATB_IF2_data2_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data2 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data1_Pos 8U /*!< TPI ITFTTD1: ATB Interface 2 data1 Position */ +#define TPI_ITFTTD1_ATB_IF2_data1_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data1 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data0_Pos 0U /*!< TPI ITFTTD1: ATB Interface 2 data0 Position */ +#define TPI_ITFTTD1_ATB_IF2_data0_Msk (0xFFUL /*<< TPI_ITFTTD1_ATB_IF2_data0_Pos*/) /*!< TPI ITFTTD1: ATB Interface 2 data0 Mask */ + +/* TPI Integration Test ATB Control Register 0 Definitions */ +#define TPI_ITATBCTR0_AFVALID2S_Pos 1U /*!< TPI ITATBCTR0: AFVALID2S Position */ +#define TPI_ITATBCTR0_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID2S_Pos) /*!< TPI ITATBCTR0: AFVALID2SS Mask */ + +#define TPI_ITATBCTR0_AFVALID1S_Pos 1U /*!< TPI ITATBCTR0: AFVALID1S Position */ +#define TPI_ITATBCTR0_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID1S_Pos) /*!< TPI ITATBCTR0: AFVALID1SS Mask */ + +#define TPI_ITATBCTR0_ATREADY2S_Pos 0U /*!< TPI ITATBCTR0: ATREADY2S Position */ +#define TPI_ITATBCTR0_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2S_Pos*/) /*!< TPI ITATBCTR0: ATREADY2S Mask */ + +#define TPI_ITATBCTR0_ATREADY1S_Pos 0U /*!< TPI ITATBCTR0: ATREADY1S Position */ +#define TPI_ITATBCTR0_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1S_Pos*/) /*!< TPI ITATBCTR0: ATREADY1S Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFOSZ Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFOSZ Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/* Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ +#define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ + +#define FPU_MVFR0_Short_vectors_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_Short_vectors_Msk (0xFUL << FPU_MVFR0_Short_vectors_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_Square_root_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_Square_root_Msk (0xFUL << FPU_MVFR0_Square_root_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_Divide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_Divide_Msk (0xFUL << FPU_MVFR0_Divide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FP_excep_trapping_Pos 12U /*!< MVFR0: FP exception trapping bits Position */ +#define FPU_MVFR0_FP_excep_trapping_Msk (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos) /*!< MVFR0: FP exception trapping bits Mask */ + +#define FPU_MVFR0_Double_precision_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_Double_precision_Msk (0xFUL << FPU_MVFR0_Double_precision_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_Single_precision_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_Single_precision_Msk (0xFUL << FPU_MVFR0_Single_precision_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ +#define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ + +/* Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ +#define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ + +#define FPU_MVFR1_FP_HPFP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FP_HPFP_Msk (0xFUL << FPU_MVFR1_FP_HPFP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_D_NaN_mode_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_D_NaN_mode_Msk (0xFUL << FPU_MVFR1_D_NaN_mode_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/* Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: FPMisc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: FPMisc bits Mask */ + +/*@} end of group CMSIS_FPU */ + +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< \deprecated CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< \deprecated CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< \deprecated CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< \deprecated CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< \deprecated CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< \deprecated CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< \deprecated CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< \deprecated CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< \deprecated CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< \deprecated CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (0x1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (0x1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (0x1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (0x1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (0x1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (0x1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (0x1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (0x1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (0x1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (0x1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (0x1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (0x1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (0x1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (0x1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (0x1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCnSCB_NS ((SCnSCB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM35P_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm4.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm4.h new file mode 100755 index 00000000000..4e0e8866970 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm4.h @@ -0,0 +1,2129 @@ +/**************************************************************************//** + * @file core_cm4.h + * @brief CMSIS Cortex-M4 Core Peripheral Access Layer Header File + * @version V5.1.1 + * @date 27. March 2020 + ******************************************************************************/ +/* + * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM4_H_GENERIC +#define __CORE_CM4_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M4 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM4 definitions */ +#define __CM4_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM4_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM4_CMSIS_VERSION ((__CM4_CMSIS_VERSION_MAIN << 16U) | \ + __CM4_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (4U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM4_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM4_H_DEPENDANT +#define __CORE_CM4_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM4_REV + #define __CM4_REV 0x0000U + #warning "__CM4_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M4 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:1; /*!< bit: 9 Reserved */ + uint32_t ICI_IT_1:6; /*!< bit: 10..15 ICI/IT part 1 */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit */ + uint32_t ICI_IT_2:2; /*!< bit: 25..26 ICI/IT part 2 */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_ICI_IT_2_Pos 25U /*!< xPSR: ICI/IT part 2 Position */ +#define xPSR_ICI_IT_2_Msk (3UL << xPSR_ICI_IT_2_Pos) /*!< xPSR: ICI/IT part 2 Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ICI_IT_1_Pos 10U /*!< xPSR: ICI/IT part 1 Position */ +#define xPSR_ICI_IT_1_Msk (0x3FUL << xPSR_ICI_IT_1_Pos) /*!< xPSR: ICI/IT part 1 Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t FPCA:1; /*!< bit: 2 FP extension active flag */ + uint32_t _reserved0:29; /*!< bit: 3..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[24U]; + __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[24U]; + __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[24U]; + __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[24U]; + __IOM uint32_t IABR[8U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[56U]; + __IOM uint8_t IP[240U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED5[644U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHP[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[5U]; + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +#define SCB_AIRCR_VECTRESET_Pos 0U /*!< SCB AIRCR: VECTRESET Position */ +#define SCB_AIRCR_VECTRESET_Msk (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/) /*!< SCB AIRCR: VECTRESET Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +#define SCB_CCR_NONBASETHRDENA_Pos 0U /*!< SCB CCR: NONBASETHRDENA Position */ +#define SCB_CCR_NONBASETHRDENA_Msk (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/) /*!< SCB CCR: NONBASETHRDENA Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISOOFP_Pos 9U /*!< ACTLR: DISOOFP Position */ +#define SCnSCB_ACTLR_DISOOFP_Msk (1UL << SCnSCB_ACTLR_DISOOFP_Pos) /*!< ACTLR: DISOOFP Mask */ + +#define SCnSCB_ACTLR_DISFPCA_Pos 8U /*!< ACTLR: DISFPCA Position */ +#define SCnSCB_ACTLR_DISFPCA_Msk (1UL << SCnSCB_ACTLR_DISFPCA_Pos) /*!< ACTLR: DISFPCA Mask */ + +#define SCnSCB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define SCnSCB_ACTLR_DISFOLD_Msk (1UL << SCnSCB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +#define SCnSCB_ACTLR_DISDEFWBUF_Pos 1U /*!< ACTLR: DISDEFWBUF Position */ +#define SCnSCB_ACTLR_DISDEFWBUF_Msk (1UL << SCnSCB_ACTLR_DISDEFWBUF_Pos) /*!< ACTLR: DISDEFWBUF Mask */ + +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[6U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TraceBusID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TraceBusID_Msk (0x7FUL << ITM_TCR_TraceBusID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPrescale_Pos 8U /*!< ITM TCR: TSPrescale Position */ +#define ITM_TCR_TSPrescale_Msk (3UL << ITM_TCR_TSPrescale_Pos) /*!< ITM TCR: TSPrescale Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + __IOM uint32_t MASK0; /*!< Offset: 0x024 (R/W) Mask Register 0 */ + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED0[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + __IOM uint32_t MASK1; /*!< Offset: 0x034 (R/W) Mask Register 1 */ + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + __IOM uint32_t MASK2; /*!< Offset: 0x044 (R/W) Mask Register 2 */ + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + __IOM uint32_t MASK3; /*!< Offset: 0x054 (R/W) Mask Register 3 */ + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Mask Register Definitions */ +#define DWT_MASK_MASK_Pos 0U /*!< DWT MASK: MASK Position */ +#define DWT_MASK_MASK_Msk (0x1FUL /*<< DWT_MASK_MASK_Pos*/) /*!< DWT MASK: MASK Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVADDR1_Pos 16U /*!< DWT FUNCTION: DATAVADDR1 Position */ +#define DWT_FUNCTION_DATAVADDR1_Msk (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos) /*!< DWT FUNCTION: DATAVADDR1 Mask */ + +#define DWT_FUNCTION_DATAVADDR0_Pos 12U /*!< DWT FUNCTION: DATAVADDR0 Position */ +#define DWT_FUNCTION_DATAVADDR0_Msk (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos) /*!< DWT FUNCTION: DATAVADDR0 Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_LNK1ENA_Pos 9U /*!< DWT FUNCTION: LNK1ENA Position */ +#define DWT_FUNCTION_LNK1ENA_Msk (0x1UL << DWT_FUNCTION_LNK1ENA_Pos) /*!< DWT FUNCTION: LNK1ENA Mask */ + +#define DWT_FUNCTION_DATAVMATCH_Pos 8U /*!< DWT FUNCTION: DATAVMATCH Position */ +#define DWT_FUNCTION_DATAVMATCH_Msk (0x1UL << DWT_FUNCTION_DATAVMATCH_Pos) /*!< DWT FUNCTION: DATAVMATCH Mask */ + +#define DWT_FUNCTION_CYCMATCH_Pos 7U /*!< DWT FUNCTION: CYCMATCH Position */ +#define DWT_FUNCTION_CYCMATCH_Msk (0x1UL << DWT_FUNCTION_CYCMATCH_Pos) /*!< DWT FUNCTION: CYCMATCH Mask */ + +#define DWT_FUNCTION_EMITRANGE_Pos 5U /*!< DWT FUNCTION: EMITRANGE Position */ +#define DWT_FUNCTION_EMITRANGE_Msk (0x1UL << DWT_FUNCTION_EMITRANGE_Pos) /*!< DWT FUNCTION: EMITRANGE Mask */ + +#define DWT_FUNCTION_FUNCTION_Pos 0U /*!< DWT FUNCTION: FUNCTION Position */ +#define DWT_FUNCTION_FUNCTION_Msk (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/) /*!< DWT FUNCTION: FUNCTION Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IM uint32_t FSCR; /*!< Offset: 0x308 (R/ ) Formatter Synchronization Counter Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t FIFO0; /*!< Offset: 0xEEC (R/ ) Integration ETM Data */ + __IM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/ ) ITATBCTR2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) ITATBCTR0 */ + __IM uint32_t FIFO1; /*!< Offset: 0xEFC (R/ ) Integration ITM Data */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) TPIU_DEVID */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) TPIU_DEVTYPE */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration ETM Data Register Definitions (FIFO0) */ +#define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ +#define TPI_FIFO0_ITM_ATVALID_Msk (0x1UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ + +#define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ +#define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ + +#define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ +#define TPI_FIFO0_ETM_ATVALID_Msk (0x1UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ + +#define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ +#define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ + +#define TPI_FIFO0_ETM2_Pos 16U /*!< TPI FIFO0: ETM2 Position */ +#define TPI_FIFO0_ETM2_Msk (0xFFUL << TPI_FIFO0_ETM2_Pos) /*!< TPI FIFO0: ETM2 Mask */ + +#define TPI_FIFO0_ETM1_Pos 8U /*!< TPI FIFO0: ETM1 Position */ +#define TPI_FIFO0_ETM1_Msk (0xFFUL << TPI_FIFO0_ETM1_Pos) /*!< TPI FIFO0: ETM1 Mask */ + +#define TPI_FIFO0_ETM0_Pos 0U /*!< TPI FIFO0: ETM0 Position */ +#define TPI_FIFO0_ETM0_Msk (0xFFUL /*<< TPI_FIFO0_ETM0_Pos*/) /*!< TPI FIFO0: ETM0 Mask */ + +/* TPI ITATBCTR2 Register Definitions */ +#define TPI_ITATBCTR2_ATREADY2_Pos 0U /*!< TPI ITATBCTR2: ATREADY2 Position */ +#define TPI_ITATBCTR2_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2_Pos*/) /*!< TPI ITATBCTR2: ATREADY2 Mask */ + +#define TPI_ITATBCTR2_ATREADY1_Pos 0U /*!< TPI ITATBCTR2: ATREADY1 Position */ +#define TPI_ITATBCTR2_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1_Pos*/) /*!< TPI ITATBCTR2: ATREADY1 Mask */ + +/* TPI Integration ITM Data Register Definitions (FIFO1) */ +#define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ +#define TPI_FIFO1_ITM_ATVALID_Msk (0x1UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ + +#define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ +#define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ + +#define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ +#define TPI_FIFO1_ETM_ATVALID_Msk (0x1UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ + +#define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ +#define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ + +#define TPI_FIFO1_ITM2_Pos 16U /*!< TPI FIFO1: ITM2 Position */ +#define TPI_FIFO1_ITM2_Msk (0xFFUL << TPI_FIFO1_ITM2_Pos) /*!< TPI FIFO1: ITM2 Mask */ + +#define TPI_FIFO1_ITM1_Pos 8U /*!< TPI FIFO1: ITM1 Position */ +#define TPI_FIFO1_ITM1_Msk (0xFFUL << TPI_FIFO1_ITM1_Pos) /*!< TPI FIFO1: ITM1 Mask */ + +#define TPI_FIFO1_ITM0_Pos 0U /*!< TPI FIFO1: ITM0 Position */ +#define TPI_FIFO1_ITM0_Msk (0xFFUL /*<< TPI_FIFO1_ITM0_Pos*/) /*!< TPI FIFO1: ITM0 Mask */ + +/* TPI ITATBCTR0 Register Definitions */ +#define TPI_ITATBCTR0_ATREADY2_Pos 0U /*!< TPI ITATBCTR0: ATREADY2 Position */ +#define TPI_ITATBCTR0_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2_Pos*/) /*!< TPI ITATBCTR0: ATREADY2 Mask */ + +#define TPI_ITATBCTR0_ATREADY1_Pos 0U /*!< TPI ITATBCTR0: ATREADY1 Position */ +#define TPI_ITATBCTR0_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1_Pos*/) /*!< TPI ITATBCTR0: ATREADY1 Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_MinBufSz_Pos 6U /*!< TPI DEVID: MinBufSz Position */ +#define TPI_DEVID_MinBufSz_Msk (0x7UL << TPI_DEVID_MinBufSz_Pos) /*!< TPI DEVID: MinBufSz Mask */ + +#define TPI_DEVID_AsynClkIn_Pos 5U /*!< TPI DEVID: AsynClkIn Position */ +#define TPI_DEVID_AsynClkIn_Msk (0x1UL << TPI_DEVID_AsynClkIn_Pos) /*!< TPI DEVID: AsynClkIn Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x1FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Alias 1 Region Base Address Register */ + __IOM uint32_t RASR_A1; /*!< Offset: 0x018 (R/W) MPU Alias 1 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Alias 2 Region Base Address Register */ + __IOM uint32_t RASR_A2; /*!< Offset: 0x020 (R/W) MPU Alias 2 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Alias 3 Region Base Address Register */ + __IOM uint32_t RASR_A3; /*!< Offset: 0x028 (R/W) MPU Alias 3 Region Attribute and Size Register */ +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 5U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif /* defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and FP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and FP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and FP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/* Media and FP Feature Register 0 Definitions */ +#define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ +#define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ + +#define FPU_MVFR0_Short_vectors_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_Short_vectors_Msk (0xFUL << FPU_MVFR0_Short_vectors_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_Square_root_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_Square_root_Msk (0xFUL << FPU_MVFR0_Square_root_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_Divide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_Divide_Msk (0xFUL << FPU_MVFR0_Divide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FP_excep_trapping_Pos 12U /*!< MVFR0: FP exception trapping bits Position */ +#define FPU_MVFR0_FP_excep_trapping_Msk (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos) /*!< MVFR0: FP exception trapping bits Mask */ + +#define FPU_MVFR0_Double_precision_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_Double_precision_Msk (0xFUL << FPU_MVFR0_Double_precision_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_Single_precision_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_Single_precision_Msk (0xFUL << FPU_MVFR0_Single_precision_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ +#define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ + +/* Media and FP Feature Register 1 Definitions */ +#define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ +#define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ + +#define FPU_MVFR1_FP_HPFP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FP_HPFP_Msk (0xFUL << FPU_MVFR1_FP_HPFP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_D_NaN_mode_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_D_NaN_mode_Msk (0xFUL << FPU_MVFR1_D_NaN_mode_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/* Media and FP Feature Register 2 Definitions */ + +#define FPU_MVFR2_VFP_Misc_Pos 4U /*!< MVFR2: VFP Misc bits Position */ +#define FPU_MVFR2_VFP_Misc_Msk (0xFUL << FPU_MVFR2_VFP_Misc_Pos) /*!< MVFR2: VFP Misc bits Mask */ + +/*@} end of group CMSIS_FPU */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ +#define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ +#define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ +#define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ +#define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ +#define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ +#define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ +#define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE) /*!< Core Debug configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +#define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ +#define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ +#define EXC_RETURN_HANDLER_FPU (0xFFFFFFE1UL) /* return to Handler mode, uses MSP after return, restore floating-point state */ +#define EXC_RETURN_THREAD_MSP_FPU (0xFFFFFFE9UL) /* return to Thread mode, uses MSP after return, restore floating-point state */ +#define EXC_RETURN_THREAD_PSP_FPU (0xFFFFFFEDUL) /* return to Thread mode, uses PSP after return, restore floating-point state */ + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IP[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + /* ARM Application Note 321 states that the M4 does not require the architectural barrier */ +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv7.h" + +#endif + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM4_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm55.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm55.h new file mode 100755 index 00000000000..6efaa3f8429 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm55.h @@ -0,0 +1,4215 @@ +/**************************************************************************//** + * @file core_cm55.h + * @brief CMSIS Cortex-M55 Core Peripheral Access Layer Header File + * @version V1.0.0 + * @date 27. March 2020 + ******************************************************************************/ +/* + * Copyright (c) 2018-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM55_H_GENERIC +#define __CORE_CM55_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_CM55 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM55 definitions */ +#define __CM55_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM55_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM55_CMSIS_VERSION ((__CM55_CMSIS_VERSION_MAIN << 16U) | \ + __CM55_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (55U) /*!< Cortex-M Core */ + +#if defined ( __CC_ARM ) + #error Legacy Arm Compiler does not support Armv8.1-M target architecture. +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM55_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM55_H_DEPENDANT +#define __CORE_CM55_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM55_REV + #define __CM55_REV 0x0000U + #warning "__CM55_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #if __FPU_PRESENT != 0U + #ifndef __FPU_DP + #define __FPU_DP 0U + #warning "__FPU_DP not defined in device header file; using default!" + #endif + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __PMU_PRESENT + #define __PMU_PRESENT 0U + #warning "__PMU_PRESENT not defined in device header file; using default!" + #endif + + #if __PMU_PRESENT != 0U + #ifndef __PMU_NUM_EVENTCNT + #define __PMU_NUM_EVENTCNT 8U + #warning "__PMU_NUM_EVENTCNT not defined in device header file; using default!" + #elif (__PMU_NUM_EVENTCNT > 8 || __PMU_NUM_EVENTCNT < 2) + #error "__PMU_NUM_EVENTCNT is out of range in device header file!" */ + #endif + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M55 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED3[92U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + __IOM uint32_t RFSR; /*!< Offset: 0x204 (R/W) RAS Fault Status Register */ + uint32_t RESERVED4[14U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_IESB_Pos 5U /*!< SCB AIRCR: Implicit ESB Enable Position */ +#define SCB_AIRCR_IESB_Msk (1UL << SCB_AIRCR_IESB_Pos) /*!< SCB AIRCR: Implicit ESB Enable Mask */ + +#define SCB_AIRCR_DIT_Pos 4U /*!< SCB AIRCR: Data Independent Timing Position */ +#define SCB_AIRCR_DIT_Msk (1UL << SCB_AIRCR_DIT_Pos) /*!< SCB AIRCR: Data Independent Timing Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_TRD_Pos 20U /*!< SCB CCR: TRD Position */ +#define SCB_CCR_TRD_Msk (1UL << SCB_CCR_TRD_Pos) /*!< SCB CCR: TRD Mask */ + +#define SCB_CCR_LOB_Pos 19U /*!< SCB CCR: LOB Position */ +#define SCB_CCR_LOB_Msk (1UL << SCB_CCR_LOB_Pos) /*!< SCB CCR: LOB Mask */ + +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_PMU_Pos 5U /*!< SCB DFSR: PMU Position */ +#define SCB_DFSR_PMU_Msk (1UL << SCB_DFSR_PMU_Pos) /*!< SCB DFSR: PMU Mask */ + +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CP7_Pos 7U /*!< SCB NSACR: CP7 Position */ +#define SCB_NSACR_CP7_Msk (1UL << SCB_NSACR_CP7_Pos) /*!< SCB NSACR: CP7 Mask */ + +#define SCB_NSACR_CP6_Pos 6U /*!< SCB NSACR: CP6 Position */ +#define SCB_NSACR_CP6_Msk (1UL << SCB_NSACR_CP6_Pos) /*!< SCB NSACR: CP6 Mask */ + +#define SCB_NSACR_CP5_Pos 5U /*!< SCB NSACR: CP5 Position */ +#define SCB_NSACR_CP5_Msk (1UL << SCB_NSACR_CP5_Pos) /*!< SCB NSACR: CP5 Mask */ + +#define SCB_NSACR_CP4_Pos 4U /*!< SCB NSACR: CP4 Position */ +#define SCB_NSACR_CP4_Msk (1UL << SCB_NSACR_CP4_Pos) /*!< SCB NSACR: CP4 Mask */ + +#define SCB_NSACR_CP3_Pos 3U /*!< SCB NSACR: CP3 Position */ +#define SCB_NSACR_CP3_Msk (1UL << SCB_NSACR_CP3_Pos) /*!< SCB NSACR: CP3 Mask */ + +#define SCB_NSACR_CP2_Pos 2U /*!< SCB NSACR: CP2 Position */ +#define SCB_NSACR_CP2_Msk (1UL << SCB_NSACR_CP2_Pos) /*!< SCB NSACR: CP2 Mask */ + +#define SCB_NSACR_CP1_Pos 1U /*!< SCB NSACR: CP1 Position */ +#define SCB_NSACR_CP1_Msk (1UL << SCB_NSACR_CP1_Pos) /*!< SCB NSACR: CP1 Mask */ + +#define SCB_NSACR_CP0_Pos 0U /*!< SCB NSACR: CP0 Position */ +#define SCB_NSACR_CP0_Msk (1UL /*<< SCB_NSACR_CP0_Pos*/) /*!< SCB NSACR: CP0 Mask */ + +/* SCB Debug Feature Register 0 Definitions */ +#define SCB_ID_DFR_UDE_Pos 28U /*!< SCB ID_DFR: UDE Position */ +#define SCB_ID_DFR_UDE_Msk (0xFUL << SCB_ID_DFR_UDE_Pos) /*!< SCB ID_DFR: UDE Mask */ + +#define SCB_ID_DFR_MProfDbg_Pos 20U /*!< SCB ID_DFR: MProfDbg Position */ +#define SCB_ID_DFR_MProfDbg_Msk (0xFUL << SCB_ID_DFR_MProfDbg_Pos) /*!< SCB ID_DFR: MProfDbg Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB RAS Fault Status Register Definitions */ +#define SCB_RFSR_V_Pos 31U /*!< SCB RFSR: V Position */ +#define SCB_RFSR_V_Msk (1UL << SCB_RFSR_V_Pos) /*!< SCB RFSR: V Mask */ + +#define SCB_RFSR_IS_Pos 16U /*!< SCB RFSR: IS Position */ +#define SCB_RFSR_IS_Msk (0x7FFFUL << SCB_RFSR_IS_Pos) /*!< SCB RFSR: IS Mask */ + +#define SCB_RFSR_UET_Pos 0U /*!< SCB RFSR: UET Position */ +#define SCB_RFSR_UET_Msk (3UL /*<< SCB_RFSR_UET_Pos*/) /*!< SCB RFSR: UET Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED6[3U]; + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) ITM Device Type Register */ + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Sizes Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Sizes Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[809U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) Software Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) Software Lock Status Register */ + uint32_t RESERVED4[4U]; + __IM uint32_t TYPE; /*!< Offset: 0xFC8 (R/ ) Device Identifier Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_SWOSCALER_Pos 0U /*!< TPI ACPR: SWOSCALER Position */ +#define TPI_ACPR_SWOSCALER_Msk (0xFFFFUL /*<< TPI_ACPR_SWOSCALER_Pos*/) /*!< TPI ACPR: SWOSCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFmt_Pos 0U /*!< TPI FFCR: EnFmt Position */ +#define TPI_FFCR_EnFmt_Msk (0x3UL << /*TPI_FFCR_EnFmt_Pos*/) /*!< TPI FFCR: EnFmt Mask */ + +/* TPI Periodic Synchronization Control Register Definitions */ +#define TPI_PSCR_PSCount_Pos 0U /*!< TPI PSCR: PSCount Position */ +#define TPI_PSCR_PSCount_Msk (0x1FUL /*<< TPI_PSCR_PSCount_Pos*/) /*!< TPI PSCR: TPSCount Mask */ + +/* TPI Software Lock Status Register Definitions */ +#define TPI_LSR_nTT_Pos 1U /*!< TPI LSR: Not thirty-two bit. Position */ +#define TPI_LSR_nTT_Msk (0x1UL << TPI_LSR_nTT_Pos) /*!< TPI LSR: Not thirty-two bit. Mask */ + +#define TPI_LSR_SLK_Pos 1U /*!< TPI LSR: Software Lock status Position */ +#define TPI_LSR_SLK_Msk (0x1UL << TPI_LSR_SLK_Pos) /*!< TPI LSR: Software Lock status Mask */ + +#define TPI_LSR_SLI_Pos 0U /*!< TPI LSR: Software Lock implemented Position */ +#define TPI_LSR_SLI_Msk (0x1UL /*<< TPI_LSR_SLI_Pos*/) /*!< TPI LSR: Software Lock implemented Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFO depth Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFO depth Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_PMU Performance Monitoring Unit (PMU) + \brief Type definitions for the Performance Monitoring Unit (PMU) + @{ + */ + +/** + \brief Structure type to access the Performance Monitoring Unit (PMU). + */ +typedef struct +{ + __IOM uint32_t EVCNTR[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x0 (R/W) PMU Event Counter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED0[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCNTR; /*!< Offset: 0x7C (R/W) PMU Cycle Counter Register */ + uint32_t RESERVED1[224]; + __IOM uint32_t EVTYPER[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x400 (R/W) PMU Event Type and Filter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED2[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCFILTR; /*!< Offset: 0x47C (R/W) PMU Cycle Counter Filter Register */ + uint32_t RESERVED3[480]; + __IOM uint32_t CNTENSET; /*!< Offset: 0xC00 (R/W) PMU Count Enable Set Register */ + uint32_t RESERVED4[7]; + __IOM uint32_t CNTENCLR; /*!< Offset: 0xC20 (R/W) PMU Count Enable Clear Register */ + uint32_t RESERVED5[7]; + __IOM uint32_t INTENSET; /*!< Offset: 0xC40 (R/W) PMU Interrupt Enable Set Register */ + uint32_t RESERVED6[7]; + __IOM uint32_t INTENCLR; /*!< Offset: 0xC60 (R/W) PMU Interrupt Enable Clear Register */ + uint32_t RESERVED7[7]; + __IOM uint32_t OVSCLR; /*!< Offset: 0xC80 (R/W) PMU Overflow Flag Status Clear Register */ + uint32_t RESERVED8[7]; + __IOM uint32_t SWINC; /*!< Offset: 0xCA0 (R/W) PMU Software Increment Register */ + uint32_t RESERVED9[7]; + __IOM uint32_t OVSSET; /*!< Offset: 0xCC0 (R/W) PMU Overflow Flag Status Set Register */ + uint32_t RESERVED10[79]; + __IOM uint32_t TYPE; /*!< Offset: 0xE00 (R/W) PMU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0xE04 (R/W) PMU Control Register */ + uint32_t RESERVED11[108]; + __IOM uint32_t AUTHSTATUS; /*!< Offset: 0xFB8 (R/W) PMU Authentication Status Register */ + __IOM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/W) PMU Device Architecture Register */ + uint32_t RESERVED12[4]; + __IOM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/W) PMU Device Type Register */ + __IOM uint32_t PIDR4; /*!< Offset: 0xFD0 (R/W) PMU Peripheral Identification Register 4 */ + uint32_t RESERVED13[3]; + __IOM uint32_t PIDR0; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 0 */ + __IOM uint32_t PIDR1; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 1 */ + __IOM uint32_t PIDR2; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 2 */ + __IOM uint32_t PIDR3; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 3 */ + uint32_t RESERVED14[3]; + __IOM uint32_t CIDR0; /*!< Offset: 0xFF0 (R/W) PMU Component Identification Register 0 */ + __IOM uint32_t CIDR1; /*!< Offset: 0xFF4 (R/W) PMU Component Identification Register 1 */ + __IOM uint32_t CIDR2; /*!< Offset: 0xFF8 (R/W) PMU Component Identification Register 2 */ + __IOM uint32_t CIDR3; /*!< Offset: 0xFFC (R/W) PMU Component Identification Register 3 */ +} PMU_Type; + +/** \brief PMU Event Counter Registers (0-30) Definitions */ + +#define PMU_EVCNTR_CNT_Pos 0U /*!< PMU EVCNTR: Counter Position */ +#define PMU_EVCNTR_CNT_Msk (16UL /*<< PMU_EVCNTRx_CNT_Pos*/) /*!< PMU EVCNTR: Counter Mask */ + +/** \brief PMU Event Type and Filter Registers (0-30) Definitions */ + +#define PMU_EVTYPER_EVENTTOCNT_Pos 0U /*!< PMU EVTYPER: Event to Count Position */ +#define PMU_EVTYPER_EVENTTOCNT_Msk (16UL /*<< EVTYPERx_EVENTTOCNT_Pos*/) /*!< PMU EVTYPER: Event to Count Mask */ + +/** \brief PMU Count Enable Set Register Definitions */ + +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENSET: Event Counter 0 Enable Set Position */ +#define PMU_CNTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENSET_CNT0_ENABLE_Pos*/) /*!< PMU CNTENSET: Event Counter 0 Enable Set Mask */ + +#define PMU_CNTENSET_CNT1_ENABLE_Pos 1U /*!< PMU CNTENSET: Event Counter 1 Enable Set Position */ +#define PMU_CNTENSET_CNT1_ENABLE_Msk (1UL << PMU_CNTENSET_CNT1_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 1 Enable Set Mask */ + +#define PMU_CNTENSET_CNT2_ENABLE_Pos 2U /*!< PMU CNTENSET: Event Counter 2 Enable Set Position */ +#define PMU_CNTENSET_CNT2_ENABLE_Msk (1UL << PMU_CNTENSET_CNT2_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 2 Enable Set Mask */ + +#define PMU_CNTENSET_CNT3_ENABLE_Pos 3U /*!< PMU CNTENSET: Event Counter 3 Enable Set Position */ +#define PMU_CNTENSET_CNT3_ENABLE_Msk (1UL << PMU_CNTENSET_CNT3_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 3 Enable Set Mask */ + +#define PMU_CNTENSET_CNT4_ENABLE_Pos 4U /*!< PMU CNTENSET: Event Counter 4 Enable Set Position */ +#define PMU_CNTENSET_CNT4_ENABLE_Msk (1UL << PMU_CNTENSET_CNT4_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 4 Enable Set Mask */ + +#define PMU_CNTENSET_CNT5_ENABLE_Pos 5U /*!< PMU CNTENSET: Event Counter 5 Enable Set Position */ +#define PMU_CNTENSET_CNT5_ENABLE_Msk (1UL << PMU_CNTENSET_CNT5_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 5 Enable Set Mask */ + +#define PMU_CNTENSET_CNT6_ENABLE_Pos 6U /*!< PMU CNTENSET: Event Counter 6 Enable Set Position */ +#define PMU_CNTENSET_CNT6_ENABLE_Msk (1UL << PMU_CNTENSET_CNT6_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 6 Enable Set Mask */ + +#define PMU_CNTENSET_CNT7_ENABLE_Pos 7U /*!< PMU CNTENSET: Event Counter 7 Enable Set Position */ +#define PMU_CNTENSET_CNT7_ENABLE_Msk (1UL << PMU_CNTENSET_CNT7_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 7 Enable Set Mask */ + +#define PMU_CNTENSET_CNT8_ENABLE_Pos 8U /*!< PMU CNTENSET: Event Counter 8 Enable Set Position */ +#define PMU_CNTENSET_CNT8_ENABLE_Msk (1UL << PMU_CNTENSET_CNT8_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 8 Enable Set Mask */ + +#define PMU_CNTENSET_CNT9_ENABLE_Pos 9U /*!< PMU CNTENSET: Event Counter 9 Enable Set Position */ +#define PMU_CNTENSET_CNT9_ENABLE_Msk (1UL << PMU_CNTENSET_CNT9_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 9 Enable Set Mask */ + +#define PMU_CNTENSET_CNT10_ENABLE_Pos 10U /*!< PMU CNTENSET: Event Counter 10 Enable Set Position */ +#define PMU_CNTENSET_CNT10_ENABLE_Msk (1UL << PMU_CNTENSET_CNT10_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 10 Enable Set Mask */ + +#define PMU_CNTENSET_CNT11_ENABLE_Pos 11U /*!< PMU CNTENSET: Event Counter 11 Enable Set Position */ +#define PMU_CNTENSET_CNT11_ENABLE_Msk (1UL << PMU_CNTENSET_CNT11_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 11 Enable Set Mask */ + +#define PMU_CNTENSET_CNT12_ENABLE_Pos 12U /*!< PMU CNTENSET: Event Counter 12 Enable Set Position */ +#define PMU_CNTENSET_CNT12_ENABLE_Msk (1UL << PMU_CNTENSET_CNT12_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 12 Enable Set Mask */ + +#define PMU_CNTENSET_CNT13_ENABLE_Pos 13U /*!< PMU CNTENSET: Event Counter 13 Enable Set Position */ +#define PMU_CNTENSET_CNT13_ENABLE_Msk (1UL << PMU_CNTENSET_CNT13_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 13 Enable Set Mask */ + +#define PMU_CNTENSET_CNT14_ENABLE_Pos 14U /*!< PMU CNTENSET: Event Counter 14 Enable Set Position */ +#define PMU_CNTENSET_CNT14_ENABLE_Msk (1UL << PMU_CNTENSET_CNT14_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 14 Enable Set Mask */ + +#define PMU_CNTENSET_CNT15_ENABLE_Pos 15U /*!< PMU CNTENSET: Event Counter 15 Enable Set Position */ +#define PMU_CNTENSET_CNT15_ENABLE_Msk (1UL << PMU_CNTENSET_CNT15_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 15 Enable Set Mask */ + +#define PMU_CNTENSET_CNT16_ENABLE_Pos 16U /*!< PMU CNTENSET: Event Counter 16 Enable Set Position */ +#define PMU_CNTENSET_CNT16_ENABLE_Msk (1UL << PMU_CNTENSET_CNT16_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 16 Enable Set Mask */ + +#define PMU_CNTENSET_CNT17_ENABLE_Pos 17U /*!< PMU CNTENSET: Event Counter 17 Enable Set Position */ +#define PMU_CNTENSET_CNT17_ENABLE_Msk (1UL << PMU_CNTENSET_CNT17_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 17 Enable Set Mask */ + +#define PMU_CNTENSET_CNT18_ENABLE_Pos 18U /*!< PMU CNTENSET: Event Counter 18 Enable Set Position */ +#define PMU_CNTENSET_CNT18_ENABLE_Msk (1UL << PMU_CNTENSET_CNT18_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 18 Enable Set Mask */ + +#define PMU_CNTENSET_CNT19_ENABLE_Pos 19U /*!< PMU CNTENSET: Event Counter 19 Enable Set Position */ +#define PMU_CNTENSET_CNT19_ENABLE_Msk (1UL << PMU_CNTENSET_CNT19_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 19 Enable Set Mask */ + +#define PMU_CNTENSET_CNT20_ENABLE_Pos 20U /*!< PMU CNTENSET: Event Counter 20 Enable Set Position */ +#define PMU_CNTENSET_CNT20_ENABLE_Msk (1UL << PMU_CNTENSET_CNT20_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 20 Enable Set Mask */ + +#define PMU_CNTENSET_CNT21_ENABLE_Pos 21U /*!< PMU CNTENSET: Event Counter 21 Enable Set Position */ +#define PMU_CNTENSET_CNT21_ENABLE_Msk (1UL << PMU_CNTENSET_CNT21_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 21 Enable Set Mask */ + +#define PMU_CNTENSET_CNT22_ENABLE_Pos 22U /*!< PMU CNTENSET: Event Counter 22 Enable Set Position */ +#define PMU_CNTENSET_CNT22_ENABLE_Msk (1UL << PMU_CNTENSET_CNT22_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 22 Enable Set Mask */ + +#define PMU_CNTENSET_CNT23_ENABLE_Pos 23U /*!< PMU CNTENSET: Event Counter 23 Enable Set Position */ +#define PMU_CNTENSET_CNT23_ENABLE_Msk (1UL << PMU_CNTENSET_CNT23_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 23 Enable Set Mask */ + +#define PMU_CNTENSET_CNT24_ENABLE_Pos 24U /*!< PMU CNTENSET: Event Counter 24 Enable Set Position */ +#define PMU_CNTENSET_CNT24_ENABLE_Msk (1UL << PMU_CNTENSET_CNT24_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 24 Enable Set Mask */ + +#define PMU_CNTENSET_CNT25_ENABLE_Pos 25U /*!< PMU CNTENSET: Event Counter 25 Enable Set Position */ +#define PMU_CNTENSET_CNT25_ENABLE_Msk (1UL << PMU_CNTENSET_CNT25_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 25 Enable Set Mask */ + +#define PMU_CNTENSET_CNT26_ENABLE_Pos 26U /*!< PMU CNTENSET: Event Counter 26 Enable Set Position */ +#define PMU_CNTENSET_CNT26_ENABLE_Msk (1UL << PMU_CNTENSET_CNT26_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 26 Enable Set Mask */ + +#define PMU_CNTENSET_CNT27_ENABLE_Pos 27U /*!< PMU CNTENSET: Event Counter 27 Enable Set Position */ +#define PMU_CNTENSET_CNT27_ENABLE_Msk (1UL << PMU_CNTENSET_CNT27_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 27 Enable Set Mask */ + +#define PMU_CNTENSET_CNT28_ENABLE_Pos 28U /*!< PMU CNTENSET: Event Counter 28 Enable Set Position */ +#define PMU_CNTENSET_CNT28_ENABLE_Msk (1UL << PMU_CNTENSET_CNT28_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 28 Enable Set Mask */ + +#define PMU_CNTENSET_CNT29_ENABLE_Pos 29U /*!< PMU CNTENSET: Event Counter 29 Enable Set Position */ +#define PMU_CNTENSET_CNT29_ENABLE_Msk (1UL << PMU_CNTENSET_CNT29_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 29 Enable Set Mask */ + +#define PMU_CNTENSET_CNT30_ENABLE_Pos 30U /*!< PMU CNTENSET: Event Counter 30 Enable Set Position */ +#define PMU_CNTENSET_CNT30_ENABLE_Msk (1UL << PMU_CNTENSET_CNT30_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 30 Enable Set Mask */ + +#define PMU_CNTENSET_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENSET: Cycle Counter Enable Set Position */ +#define PMU_CNTENSET_CCNTR_ENABLE_Msk (1UL << PMU_CNTENSET_CCNTR_ENABLE_Pos) /*!< PMU CNTENSET: Cycle Counter Enable Set Mask */ + +/** \brief PMU Count Enable Clear Register Definitions */ + +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Position */ +#define PMU_CNTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU CNTENCLR: Event Counter 1 Enable Clear Position */ +#define PMU_CNTENCLR_CNT1_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT1_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 1 Enable Clear */ + +#define PMU_CNTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Position */ +#define PMU_CNTENCLR_CNT2_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT2_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Position */ +#define PMU_CNTENCLR_CNT3_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT3_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Position */ +#define PMU_CNTENCLR_CNT4_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT4_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Position */ +#define PMU_CNTENCLR_CNT5_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT5_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Position */ +#define PMU_CNTENCLR_CNT6_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT6_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Position */ +#define PMU_CNTENCLR_CNT7_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT7_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Position */ +#define PMU_CNTENCLR_CNT8_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT8_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Position */ +#define PMU_CNTENCLR_CNT9_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT9_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Position */ +#define PMU_CNTENCLR_CNT10_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT10_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Position */ +#define PMU_CNTENCLR_CNT11_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT11_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Position */ +#define PMU_CNTENCLR_CNT12_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT12_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Position */ +#define PMU_CNTENCLR_CNT13_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT13_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Position */ +#define PMU_CNTENCLR_CNT14_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT14_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Position */ +#define PMU_CNTENCLR_CNT15_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT15_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Position */ +#define PMU_CNTENCLR_CNT16_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT16_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Position */ +#define PMU_CNTENCLR_CNT17_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT17_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Position */ +#define PMU_CNTENCLR_CNT18_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT18_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Position */ +#define PMU_CNTENCLR_CNT19_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT19_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Position */ +#define PMU_CNTENCLR_CNT20_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT20_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Position */ +#define PMU_CNTENCLR_CNT21_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT21_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Position */ +#define PMU_CNTENCLR_CNT22_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT22_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Position */ +#define PMU_CNTENCLR_CNT23_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT23_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Position */ +#define PMU_CNTENCLR_CNT24_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT24_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Position */ +#define PMU_CNTENCLR_CNT25_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT25_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Position */ +#define PMU_CNTENCLR_CNT26_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT26_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Position */ +#define PMU_CNTENCLR_CNT27_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT27_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Position */ +#define PMU_CNTENCLR_CNT28_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT28_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Position */ +#define PMU_CNTENCLR_CNT29_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT29_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Position */ +#define PMU_CNTENCLR_CNT30_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT30_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Mask */ + +#define PMU_CNTENCLR_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENCLR: Cycle Counter Enable Clear Position */ +#define PMU_CNTENCLR_CCNTR_ENABLE_Msk (1UL << PMU_CNTENCLR_CCNTR_ENABLE_Pos) /*!< PMU CNTENCLR: Cycle Counter Enable Clear Mask */ + +/** \brief PMU Interrupt Enable Set Register Definitions */ + +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENSET_CNT0_ENABLE_Pos*/) /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT1_ENABLE_Pos 1U /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT1_ENABLE_Msk (1UL << PMU_INTENSET_CNT1_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT2_ENABLE_Pos 2U /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT2_ENABLE_Msk (1UL << PMU_INTENSET_CNT2_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT3_ENABLE_Pos 3U /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT3_ENABLE_Msk (1UL << PMU_INTENSET_CNT3_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT4_ENABLE_Pos 4U /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT4_ENABLE_Msk (1UL << PMU_INTENSET_CNT4_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT5_ENABLE_Pos 5U /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT5_ENABLE_Msk (1UL << PMU_INTENSET_CNT5_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT6_ENABLE_Pos 6U /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT6_ENABLE_Msk (1UL << PMU_INTENSET_CNT6_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT7_ENABLE_Pos 7U /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT7_ENABLE_Msk (1UL << PMU_INTENSET_CNT7_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT8_ENABLE_Pos 8U /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT8_ENABLE_Msk (1UL << PMU_INTENSET_CNT8_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT9_ENABLE_Pos 9U /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT9_ENABLE_Msk (1UL << PMU_INTENSET_CNT9_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT10_ENABLE_Pos 10U /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT10_ENABLE_Msk (1UL << PMU_INTENSET_CNT10_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT11_ENABLE_Pos 11U /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT11_ENABLE_Msk (1UL << PMU_INTENSET_CNT11_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT12_ENABLE_Pos 12U /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT12_ENABLE_Msk (1UL << PMU_INTENSET_CNT12_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT13_ENABLE_Pos 13U /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT13_ENABLE_Msk (1UL << PMU_INTENSET_CNT13_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT14_ENABLE_Pos 14U /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT14_ENABLE_Msk (1UL << PMU_INTENSET_CNT14_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT15_ENABLE_Pos 15U /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT15_ENABLE_Msk (1UL << PMU_INTENSET_CNT15_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT16_ENABLE_Pos 16U /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT16_ENABLE_Msk (1UL << PMU_INTENSET_CNT16_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT17_ENABLE_Pos 17U /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT17_ENABLE_Msk (1UL << PMU_INTENSET_CNT17_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT18_ENABLE_Pos 18U /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT18_ENABLE_Msk (1UL << PMU_INTENSET_CNT18_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT19_ENABLE_Pos 19U /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT19_ENABLE_Msk (1UL << PMU_INTENSET_CNT19_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT20_ENABLE_Pos 20U /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT20_ENABLE_Msk (1UL << PMU_INTENSET_CNT20_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT21_ENABLE_Pos 21U /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT21_ENABLE_Msk (1UL << PMU_INTENSET_CNT21_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT22_ENABLE_Pos 22U /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT22_ENABLE_Msk (1UL << PMU_INTENSET_CNT22_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT23_ENABLE_Pos 23U /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT23_ENABLE_Msk (1UL << PMU_INTENSET_CNT23_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT24_ENABLE_Pos 24U /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT24_ENABLE_Msk (1UL << PMU_INTENSET_CNT24_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT25_ENABLE_Pos 25U /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT25_ENABLE_Msk (1UL << PMU_INTENSET_CNT25_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT26_ENABLE_Pos 26U /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT26_ENABLE_Msk (1UL << PMU_INTENSET_CNT26_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT27_ENABLE_Pos 27U /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT27_ENABLE_Msk (1UL << PMU_INTENSET_CNT27_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT28_ENABLE_Pos 28U /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT28_ENABLE_Msk (1UL << PMU_INTENSET_CNT28_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT29_ENABLE_Pos 29U /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT29_ENABLE_Msk (1UL << PMU_INTENSET_CNT29_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT30_ENABLE_Pos 30U /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT30_ENABLE_Msk (1UL << PMU_INTENSET_CNT30_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Position */ +#define PMU_INTENSET_CCYCNT_ENABLE_Msk (1UL << PMU_INTENSET_CYCCNT_ENABLE_Pos) /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Mask */ + +/** \brief PMU Interrupt Enable Clear Register Definitions */ + +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT1_ENABLE_Msk (1UL << PMU_INTENCLR_CNT1_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear */ + +#define PMU_INTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT2_ENABLE_Msk (1UL << PMU_INTENCLR_CNT2_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT3_ENABLE_Msk (1UL << PMU_INTENCLR_CNT3_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT4_ENABLE_Msk (1UL << PMU_INTENCLR_CNT4_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT5_ENABLE_Msk (1UL << PMU_INTENCLR_CNT5_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT6_ENABLE_Msk (1UL << PMU_INTENCLR_CNT6_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT7_ENABLE_Msk (1UL << PMU_INTENCLR_CNT7_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT8_ENABLE_Msk (1UL << PMU_INTENCLR_CNT8_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT9_ENABLE_Msk (1UL << PMU_INTENCLR_CNT9_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT10_ENABLE_Msk (1UL << PMU_INTENCLR_CNT10_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT11_ENABLE_Msk (1UL << PMU_INTENCLR_CNT11_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT12_ENABLE_Msk (1UL << PMU_INTENCLR_CNT12_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT13_ENABLE_Msk (1UL << PMU_INTENCLR_CNT13_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT14_ENABLE_Msk (1UL << PMU_INTENCLR_CNT14_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT15_ENABLE_Msk (1UL << PMU_INTENCLR_CNT15_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT16_ENABLE_Msk (1UL << PMU_INTENCLR_CNT16_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT17_ENABLE_Msk (1UL << PMU_INTENCLR_CNT17_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT18_ENABLE_Msk (1UL << PMU_INTENCLR_CNT18_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT19_ENABLE_Msk (1UL << PMU_INTENCLR_CNT19_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT20_ENABLE_Msk (1UL << PMU_INTENCLR_CNT20_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT21_ENABLE_Msk (1UL << PMU_INTENCLR_CNT21_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT22_ENABLE_Msk (1UL << PMU_INTENCLR_CNT22_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT23_ENABLE_Msk (1UL << PMU_INTENCLR_CNT23_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT24_ENABLE_Msk (1UL << PMU_INTENCLR_CNT24_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT25_ENABLE_Msk (1UL << PMU_INTENCLR_CNT25_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT26_ENABLE_Msk (1UL << PMU_INTENCLR_CNT26_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT27_ENABLE_Msk (1UL << PMU_INTENCLR_CNT27_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT28_ENABLE_Msk (1UL << PMU_INTENCLR_CNT28_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT29_ENABLE_Msk (1UL << PMU_INTENCLR_CNT29_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT30_ENABLE_Msk (1UL << PMU_INTENCLR_CNT30_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CYCCNT_ENABLE_Msk (1UL << PMU_INTENCLR_CYCCNT_ENABLE_Pos) /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Mask */ + +/** \brief PMU Overflow Flag Status Set Register Definitions */ + +#define PMU_OVSSET_CNT0_STATUS_Pos 0U /*!< PMU OVSSET: Event Counter 0 Overflow Set Position */ +#define PMU_OVSSET_CNT0_STATUS_Msk (1UL /*<< PMU_OVSSET_CNT0_STATUS_Pos*/) /*!< PMU OVSSET: Event Counter 0 Overflow Set Mask */ + +#define PMU_OVSSET_CNT1_STATUS_Pos 1U /*!< PMU OVSSET: Event Counter 1 Overflow Set Position */ +#define PMU_OVSSET_CNT1_STATUS_Msk (1UL << PMU_OVSSET_CNT1_STATUS_Pos) /*!< PMU OVSSET: Event Counter 1 Overflow Set Mask */ + +#define PMU_OVSSET_CNT2_STATUS_Pos 2U /*!< PMU OVSSET: Event Counter 2 Overflow Set Position */ +#define PMU_OVSSET_CNT2_STATUS_Msk (1UL << PMU_OVSSET_CNT2_STATUS_Pos) /*!< PMU OVSSET: Event Counter 2 Overflow Set Mask */ + +#define PMU_OVSSET_CNT3_STATUS_Pos 3U /*!< PMU OVSSET: Event Counter 3 Overflow Set Position */ +#define PMU_OVSSET_CNT3_STATUS_Msk (1UL << PMU_OVSSET_CNT3_STATUS_Pos) /*!< PMU OVSSET: Event Counter 3 Overflow Set Mask */ + +#define PMU_OVSSET_CNT4_STATUS_Pos 4U /*!< PMU OVSSET: Event Counter 4 Overflow Set Position */ +#define PMU_OVSSET_CNT4_STATUS_Msk (1UL << PMU_OVSSET_CNT4_STATUS_Pos) /*!< PMU OVSSET: Event Counter 4 Overflow Set Mask */ + +#define PMU_OVSSET_CNT5_STATUS_Pos 5U /*!< PMU OVSSET: Event Counter 5 Overflow Set Position */ +#define PMU_OVSSET_CNT5_STATUS_Msk (1UL << PMU_OVSSET_CNT5_STATUS_Pos) /*!< PMU OVSSET: Event Counter 5 Overflow Set Mask */ + +#define PMU_OVSSET_CNT6_STATUS_Pos 6U /*!< PMU OVSSET: Event Counter 6 Overflow Set Position */ +#define PMU_OVSSET_CNT6_STATUS_Msk (1UL << PMU_OVSSET_CNT6_STATUS_Pos) /*!< PMU OVSSET: Event Counter 6 Overflow Set Mask */ + +#define PMU_OVSSET_CNT7_STATUS_Pos 7U /*!< PMU OVSSET: Event Counter 7 Overflow Set Position */ +#define PMU_OVSSET_CNT7_STATUS_Msk (1UL << PMU_OVSSET_CNT7_STATUS_Pos) /*!< PMU OVSSET: Event Counter 7 Overflow Set Mask */ + +#define PMU_OVSSET_CNT8_STATUS_Pos 8U /*!< PMU OVSSET: Event Counter 8 Overflow Set Position */ +#define PMU_OVSSET_CNT8_STATUS_Msk (1UL << PMU_OVSSET_CNT8_STATUS_Pos) /*!< PMU OVSSET: Event Counter 8 Overflow Set Mask */ + +#define PMU_OVSSET_CNT9_STATUS_Pos 9U /*!< PMU OVSSET: Event Counter 9 Overflow Set Position */ +#define PMU_OVSSET_CNT9_STATUS_Msk (1UL << PMU_OVSSET_CNT9_STATUS_Pos) /*!< PMU OVSSET: Event Counter 9 Overflow Set Mask */ + +#define PMU_OVSSET_CNT10_STATUS_Pos 10U /*!< PMU OVSSET: Event Counter 10 Overflow Set Position */ +#define PMU_OVSSET_CNT10_STATUS_Msk (1UL << PMU_OVSSET_CNT10_STATUS_Pos) /*!< PMU OVSSET: Event Counter 10 Overflow Set Mask */ + +#define PMU_OVSSET_CNT11_STATUS_Pos 11U /*!< PMU OVSSET: Event Counter 11 Overflow Set Position */ +#define PMU_OVSSET_CNT11_STATUS_Msk (1UL << PMU_OVSSET_CNT11_STATUS_Pos) /*!< PMU OVSSET: Event Counter 11 Overflow Set Mask */ + +#define PMU_OVSSET_CNT12_STATUS_Pos 12U /*!< PMU OVSSET: Event Counter 12 Overflow Set Position */ +#define PMU_OVSSET_CNT12_STATUS_Msk (1UL << PMU_OVSSET_CNT12_STATUS_Pos) /*!< PMU OVSSET: Event Counter 12 Overflow Set Mask */ + +#define PMU_OVSSET_CNT13_STATUS_Pos 13U /*!< PMU OVSSET: Event Counter 13 Overflow Set Position */ +#define PMU_OVSSET_CNT13_STATUS_Msk (1UL << PMU_OVSSET_CNT13_STATUS_Pos) /*!< PMU OVSSET: Event Counter 13 Overflow Set Mask */ + +#define PMU_OVSSET_CNT14_STATUS_Pos 14U /*!< PMU OVSSET: Event Counter 14 Overflow Set Position */ +#define PMU_OVSSET_CNT14_STATUS_Msk (1UL << PMU_OVSSET_CNT14_STATUS_Pos) /*!< PMU OVSSET: Event Counter 14 Overflow Set Mask */ + +#define PMU_OVSSET_CNT15_STATUS_Pos 15U /*!< PMU OVSSET: Event Counter 15 Overflow Set Position */ +#define PMU_OVSSET_CNT15_STATUS_Msk (1UL << PMU_OVSSET_CNT15_STATUS_Pos) /*!< PMU OVSSET: Event Counter 15 Overflow Set Mask */ + +#define PMU_OVSSET_CNT16_STATUS_Pos 16U /*!< PMU OVSSET: Event Counter 16 Overflow Set Position */ +#define PMU_OVSSET_CNT16_STATUS_Msk (1UL << PMU_OVSSET_CNT16_STATUS_Pos) /*!< PMU OVSSET: Event Counter 16 Overflow Set Mask */ + +#define PMU_OVSSET_CNT17_STATUS_Pos 17U /*!< PMU OVSSET: Event Counter 17 Overflow Set Position */ +#define PMU_OVSSET_CNT17_STATUS_Msk (1UL << PMU_OVSSET_CNT17_STATUS_Pos) /*!< PMU OVSSET: Event Counter 17 Overflow Set Mask */ + +#define PMU_OVSSET_CNT18_STATUS_Pos 18U /*!< PMU OVSSET: Event Counter 18 Overflow Set Position */ +#define PMU_OVSSET_CNT18_STATUS_Msk (1UL << PMU_OVSSET_CNT18_STATUS_Pos) /*!< PMU OVSSET: Event Counter 18 Overflow Set Mask */ + +#define PMU_OVSSET_CNT19_STATUS_Pos 19U /*!< PMU OVSSET: Event Counter 19 Overflow Set Position */ +#define PMU_OVSSET_CNT19_STATUS_Msk (1UL << PMU_OVSSET_CNT19_STATUS_Pos) /*!< PMU OVSSET: Event Counter 19 Overflow Set Mask */ + +#define PMU_OVSSET_CNT20_STATUS_Pos 20U /*!< PMU OVSSET: Event Counter 20 Overflow Set Position */ +#define PMU_OVSSET_CNT20_STATUS_Msk (1UL << PMU_OVSSET_CNT20_STATUS_Pos) /*!< PMU OVSSET: Event Counter 20 Overflow Set Mask */ + +#define PMU_OVSSET_CNT21_STATUS_Pos 21U /*!< PMU OVSSET: Event Counter 21 Overflow Set Position */ +#define PMU_OVSSET_CNT21_STATUS_Msk (1UL << PMU_OVSSET_CNT21_STATUS_Pos) /*!< PMU OVSSET: Event Counter 21 Overflow Set Mask */ + +#define PMU_OVSSET_CNT22_STATUS_Pos 22U /*!< PMU OVSSET: Event Counter 22 Overflow Set Position */ +#define PMU_OVSSET_CNT22_STATUS_Msk (1UL << PMU_OVSSET_CNT22_STATUS_Pos) /*!< PMU OVSSET: Event Counter 22 Overflow Set Mask */ + +#define PMU_OVSSET_CNT23_STATUS_Pos 23U /*!< PMU OVSSET: Event Counter 23 Overflow Set Position */ +#define PMU_OVSSET_CNT23_STATUS_Msk (1UL << PMU_OVSSET_CNT23_STATUS_Pos) /*!< PMU OVSSET: Event Counter 23 Overflow Set Mask */ + +#define PMU_OVSSET_CNT24_STATUS_Pos 24U /*!< PMU OVSSET: Event Counter 24 Overflow Set Position */ +#define PMU_OVSSET_CNT24_STATUS_Msk (1UL << PMU_OVSSET_CNT24_STATUS_Pos) /*!< PMU OVSSET: Event Counter 24 Overflow Set Mask */ + +#define PMU_OVSSET_CNT25_STATUS_Pos 25U /*!< PMU OVSSET: Event Counter 25 Overflow Set Position */ +#define PMU_OVSSET_CNT25_STATUS_Msk (1UL << PMU_OVSSET_CNT25_STATUS_Pos) /*!< PMU OVSSET: Event Counter 25 Overflow Set Mask */ + +#define PMU_OVSSET_CNT26_STATUS_Pos 26U /*!< PMU OVSSET: Event Counter 26 Overflow Set Position */ +#define PMU_OVSSET_CNT26_STATUS_Msk (1UL << PMU_OVSSET_CNT26_STATUS_Pos) /*!< PMU OVSSET: Event Counter 26 Overflow Set Mask */ + +#define PMU_OVSSET_CNT27_STATUS_Pos 27U /*!< PMU OVSSET: Event Counter 27 Overflow Set Position */ +#define PMU_OVSSET_CNT27_STATUS_Msk (1UL << PMU_OVSSET_CNT27_STATUS_Pos) /*!< PMU OVSSET: Event Counter 27 Overflow Set Mask */ + +#define PMU_OVSSET_CNT28_STATUS_Pos 28U /*!< PMU OVSSET: Event Counter 28 Overflow Set Position */ +#define PMU_OVSSET_CNT28_STATUS_Msk (1UL << PMU_OVSSET_CNT28_STATUS_Pos) /*!< PMU OVSSET: Event Counter 28 Overflow Set Mask */ + +#define PMU_OVSSET_CNT29_STATUS_Pos 29U /*!< PMU OVSSET: Event Counter 29 Overflow Set Position */ +#define PMU_OVSSET_CNT29_STATUS_Msk (1UL << PMU_OVSSET_CNT29_STATUS_Pos) /*!< PMU OVSSET: Event Counter 29 Overflow Set Mask */ + +#define PMU_OVSSET_CNT30_STATUS_Pos 30U /*!< PMU OVSSET: Event Counter 30 Overflow Set Position */ +#define PMU_OVSSET_CNT30_STATUS_Msk (1UL << PMU_OVSSET_CNT30_STATUS_Pos) /*!< PMU OVSSET: Event Counter 30 Overflow Set Mask */ + +#define PMU_OVSSET_CYCCNT_STATUS_Pos 31U /*!< PMU OVSSET: Cycle Counter Overflow Set Position */ +#define PMU_OVSSET_CYCCNT_STATUS_Msk (1UL << PMU_OVSSET_CYCCNT_STATUS_Pos) /*!< PMU OVSSET: Cycle Counter Overflow Set Mask */ + +/** \brief PMU Overflow Flag Status Clear Register Definitions */ + +#define PMU_OVSCLR_CNT0_STATUS_Pos 0U /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Position */ +#define PMU_OVSCLR_CNT0_STATUS_Msk (1UL /*<< PMU_OVSCLR_CNT0_STATUS_Pos*/) /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT1_STATUS_Pos 1U /*!< PMU OVSCLR: Event Counter 1 Overflow Clear Position */ +#define PMU_OVSCLR_CNT1_STATUS_Msk (1UL << PMU_OVSCLR_CNT1_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 1 Overflow Clear */ + +#define PMU_OVSCLR_CNT2_STATUS_Pos 2U /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Position */ +#define PMU_OVSCLR_CNT2_STATUS_Msk (1UL << PMU_OVSCLR_CNT2_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT3_STATUS_Pos 3U /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Position */ +#define PMU_OVSCLR_CNT3_STATUS_Msk (1UL << PMU_OVSCLR_CNT3_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT4_STATUS_Pos 4U /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Position */ +#define PMU_OVSCLR_CNT4_STATUS_Msk (1UL << PMU_OVSCLR_CNT4_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT5_STATUS_Pos 5U /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Position */ +#define PMU_OVSCLR_CNT5_STATUS_Msk (1UL << PMU_OVSCLR_CNT5_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT6_STATUS_Pos 6U /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Position */ +#define PMU_OVSCLR_CNT6_STATUS_Msk (1UL << PMU_OVSCLR_CNT6_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT7_STATUS_Pos 7U /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Position */ +#define PMU_OVSCLR_CNT7_STATUS_Msk (1UL << PMU_OVSCLR_CNT7_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT8_STATUS_Pos 8U /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Position */ +#define PMU_OVSCLR_CNT8_STATUS_Msk (1UL << PMU_OVSCLR_CNT8_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT9_STATUS_Pos 9U /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Position */ +#define PMU_OVSCLR_CNT9_STATUS_Msk (1UL << PMU_OVSCLR_CNT9_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT10_STATUS_Pos 10U /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Position */ +#define PMU_OVSCLR_CNT10_STATUS_Msk (1UL << PMU_OVSCLR_CNT10_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT11_STATUS_Pos 11U /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Position */ +#define PMU_OVSCLR_CNT11_STATUS_Msk (1UL << PMU_OVSCLR_CNT11_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT12_STATUS_Pos 12U /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Position */ +#define PMU_OVSCLR_CNT12_STATUS_Msk (1UL << PMU_OVSCLR_CNT12_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT13_STATUS_Pos 13U /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Position */ +#define PMU_OVSCLR_CNT13_STATUS_Msk (1UL << PMU_OVSCLR_CNT13_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT14_STATUS_Pos 14U /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Position */ +#define PMU_OVSCLR_CNT14_STATUS_Msk (1UL << PMU_OVSCLR_CNT14_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT15_STATUS_Pos 15U /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Position */ +#define PMU_OVSCLR_CNT15_STATUS_Msk (1UL << PMU_OVSCLR_CNT15_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT16_STATUS_Pos 16U /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Position */ +#define PMU_OVSCLR_CNT16_STATUS_Msk (1UL << PMU_OVSCLR_CNT16_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT17_STATUS_Pos 17U /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Position */ +#define PMU_OVSCLR_CNT17_STATUS_Msk (1UL << PMU_OVSCLR_CNT17_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT18_STATUS_Pos 18U /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Position */ +#define PMU_OVSCLR_CNT18_STATUS_Msk (1UL << PMU_OVSCLR_CNT18_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT19_STATUS_Pos 19U /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Position */ +#define PMU_OVSCLR_CNT19_STATUS_Msk (1UL << PMU_OVSCLR_CNT19_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT20_STATUS_Pos 20U /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Position */ +#define PMU_OVSCLR_CNT20_STATUS_Msk (1UL << PMU_OVSCLR_CNT20_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT21_STATUS_Pos 21U /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Position */ +#define PMU_OVSCLR_CNT21_STATUS_Msk (1UL << PMU_OVSCLR_CNT21_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT22_STATUS_Pos 22U /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Position */ +#define PMU_OVSCLR_CNT22_STATUS_Msk (1UL << PMU_OVSCLR_CNT22_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT23_STATUS_Pos 23U /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Position */ +#define PMU_OVSCLR_CNT23_STATUS_Msk (1UL << PMU_OVSCLR_CNT23_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT24_STATUS_Pos 24U /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Position */ +#define PMU_OVSCLR_CNT24_STATUS_Msk (1UL << PMU_OVSCLR_CNT24_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT25_STATUS_Pos 25U /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Position */ +#define PMU_OVSCLR_CNT25_STATUS_Msk (1UL << PMU_OVSCLR_CNT25_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT26_STATUS_Pos 26U /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Position */ +#define PMU_OVSCLR_CNT26_STATUS_Msk (1UL << PMU_OVSCLR_CNT26_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT27_STATUS_Pos 27U /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Position */ +#define PMU_OVSCLR_CNT27_STATUS_Msk (1UL << PMU_OVSCLR_CNT27_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT28_STATUS_Pos 28U /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Position */ +#define PMU_OVSCLR_CNT28_STATUS_Msk (1UL << PMU_OVSCLR_CNT28_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT29_STATUS_Pos 29U /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Position */ +#define PMU_OVSCLR_CNT29_STATUS_Msk (1UL << PMU_OVSCLR_CNT29_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT30_STATUS_Pos 30U /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Position */ +#define PMU_OVSCLR_CNT30_STATUS_Msk (1UL << PMU_OVSCLR_CNT30_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Mask */ + +#define PMU_OVSCLR_CYCCNT_STATUS_Pos 31U /*!< PMU OVSCLR: Cycle Counter Overflow Clear Position */ +#define PMU_OVSCLR_CYCCNT_STATUS_Msk (1UL << PMU_OVSCLR_CYCCNT_STATUS_Pos) /*!< PMU OVSCLR: Cycle Counter Overflow Clear Mask */ + +/** \brief PMU Software Increment Counter */ + +#define PMU_SWINC_CNT0_Pos 0U /*!< PMU SWINC: Event Counter 0 Software Increment Position */ +#define PMU_SWINC_CNT0_Msk (1UL /*<< PMU_SWINC_CNT0_Pos */) /*!< PMU SWINC: Event Counter 0 Software Increment Mask */ + +#define PMU_SWINC_CNT1_Pos 1U /*!< PMU SWINC: Event Counter 1 Software Increment Position */ +#define PMU_SWINC_CNT1_Msk (1UL << PMU_SWINC_CNT1_Pos) /*!< PMU SWINC: Event Counter 1 Software Increment Mask */ + +#define PMU_SWINC_CNT2_Pos 2U /*!< PMU SWINC: Event Counter 2 Software Increment Position */ +#define PMU_SWINC_CNT2_Msk (1UL << PMU_SWINC_CNT2_Pos) /*!< PMU SWINC: Event Counter 2 Software Increment Mask */ + +#define PMU_SWINC_CNT3_Pos 3U /*!< PMU SWINC: Event Counter 3 Software Increment Position */ +#define PMU_SWINC_CNT3_Msk (1UL << PMU_SWINC_CNT3_Pos) /*!< PMU SWINC: Event Counter 3 Software Increment Mask */ + +#define PMU_SWINC_CNT4_Pos 4U /*!< PMU SWINC: Event Counter 4 Software Increment Position */ +#define PMU_SWINC_CNT4_Msk (1UL << PMU_SWINC_CNT4_Pos) /*!< PMU SWINC: Event Counter 4 Software Increment Mask */ + +#define PMU_SWINC_CNT5_Pos 5U /*!< PMU SWINC: Event Counter 5 Software Increment Position */ +#define PMU_SWINC_CNT5_Msk (1UL << PMU_SWINC_CNT5_Pos) /*!< PMU SWINC: Event Counter 5 Software Increment Mask */ + +#define PMU_SWINC_CNT6_Pos 6U /*!< PMU SWINC: Event Counter 6 Software Increment Position */ +#define PMU_SWINC_CNT6_Msk (1UL << PMU_SWINC_CNT6_Pos) /*!< PMU SWINC: Event Counter 6 Software Increment Mask */ + +#define PMU_SWINC_CNT7_Pos 7U /*!< PMU SWINC: Event Counter 7 Software Increment Position */ +#define PMU_SWINC_CNT7_Msk (1UL << PMU_SWINC_CNT7_Pos) /*!< PMU SWINC: Event Counter 7 Software Increment Mask */ + +#define PMU_SWINC_CNT8_Pos 8U /*!< PMU SWINC: Event Counter 8 Software Increment Position */ +#define PMU_SWINC_CNT8_Msk (1UL << PMU_SWINC_CNT8_Pos) /*!< PMU SWINC: Event Counter 8 Software Increment Mask */ + +#define PMU_SWINC_CNT9_Pos 9U /*!< PMU SWINC: Event Counter 9 Software Increment Position */ +#define PMU_SWINC_CNT9_Msk (1UL << PMU_SWINC_CNT9_Pos) /*!< PMU SWINC: Event Counter 9 Software Increment Mask */ + +#define PMU_SWINC_CNT10_Pos 10U /*!< PMU SWINC: Event Counter 10 Software Increment Position */ +#define PMU_SWINC_CNT10_Msk (1UL << PMU_SWINC_CNT10_Pos) /*!< PMU SWINC: Event Counter 10 Software Increment Mask */ + +#define PMU_SWINC_CNT11_Pos 11U /*!< PMU SWINC: Event Counter 11 Software Increment Position */ +#define PMU_SWINC_CNT11_Msk (1UL << PMU_SWINC_CNT11_Pos) /*!< PMU SWINC: Event Counter 11 Software Increment Mask */ + +#define PMU_SWINC_CNT12_Pos 12U /*!< PMU SWINC: Event Counter 12 Software Increment Position */ +#define PMU_SWINC_CNT12_Msk (1UL << PMU_SWINC_CNT12_Pos) /*!< PMU SWINC: Event Counter 12 Software Increment Mask */ + +#define PMU_SWINC_CNT13_Pos 13U /*!< PMU SWINC: Event Counter 13 Software Increment Position */ +#define PMU_SWINC_CNT13_Msk (1UL << PMU_SWINC_CNT13_Pos) /*!< PMU SWINC: Event Counter 13 Software Increment Mask */ + +#define PMU_SWINC_CNT14_Pos 14U /*!< PMU SWINC: Event Counter 14 Software Increment Position */ +#define PMU_SWINC_CNT14_Msk (1UL << PMU_SWINC_CNT14_Pos) /*!< PMU SWINC: Event Counter 14 Software Increment Mask */ + +#define PMU_SWINC_CNT15_Pos 15U /*!< PMU SWINC: Event Counter 15 Software Increment Position */ +#define PMU_SWINC_CNT15_Msk (1UL << PMU_SWINC_CNT15_Pos) /*!< PMU SWINC: Event Counter 15 Software Increment Mask */ + +#define PMU_SWINC_CNT16_Pos 16U /*!< PMU SWINC: Event Counter 16 Software Increment Position */ +#define PMU_SWINC_CNT16_Msk (1UL << PMU_SWINC_CNT16_Pos) /*!< PMU SWINC: Event Counter 16 Software Increment Mask */ + +#define PMU_SWINC_CNT17_Pos 17U /*!< PMU SWINC: Event Counter 17 Software Increment Position */ +#define PMU_SWINC_CNT17_Msk (1UL << PMU_SWINC_CNT17_Pos) /*!< PMU SWINC: Event Counter 17 Software Increment Mask */ + +#define PMU_SWINC_CNT18_Pos 18U /*!< PMU SWINC: Event Counter 18 Software Increment Position */ +#define PMU_SWINC_CNT18_Msk (1UL << PMU_SWINC_CNT18_Pos) /*!< PMU SWINC: Event Counter 18 Software Increment Mask */ + +#define PMU_SWINC_CNT19_Pos 19U /*!< PMU SWINC: Event Counter 19 Software Increment Position */ +#define PMU_SWINC_CNT19_Msk (1UL << PMU_SWINC_CNT19_Pos) /*!< PMU SWINC: Event Counter 19 Software Increment Mask */ + +#define PMU_SWINC_CNT20_Pos 20U /*!< PMU SWINC: Event Counter 20 Software Increment Position */ +#define PMU_SWINC_CNT20_Msk (1UL << PMU_SWINC_CNT20_Pos) /*!< PMU SWINC: Event Counter 20 Software Increment Mask */ + +#define PMU_SWINC_CNT21_Pos 21U /*!< PMU SWINC: Event Counter 21 Software Increment Position */ +#define PMU_SWINC_CNT21_Msk (1UL << PMU_SWINC_CNT21_Pos) /*!< PMU SWINC: Event Counter 21 Software Increment Mask */ + +#define PMU_SWINC_CNT22_Pos 22U /*!< PMU SWINC: Event Counter 22 Software Increment Position */ +#define PMU_SWINC_CNT22_Msk (1UL << PMU_SWINC_CNT22_Pos) /*!< PMU SWINC: Event Counter 22 Software Increment Mask */ + +#define PMU_SWINC_CNT23_Pos 23U /*!< PMU SWINC: Event Counter 23 Software Increment Position */ +#define PMU_SWINC_CNT23_Msk (1UL << PMU_SWINC_CNT23_Pos) /*!< PMU SWINC: Event Counter 23 Software Increment Mask */ + +#define PMU_SWINC_CNT24_Pos 24U /*!< PMU SWINC: Event Counter 24 Software Increment Position */ +#define PMU_SWINC_CNT24_Msk (1UL << PMU_SWINC_CNT24_Pos) /*!< PMU SWINC: Event Counter 24 Software Increment Mask */ + +#define PMU_SWINC_CNT25_Pos 25U /*!< PMU SWINC: Event Counter 25 Software Increment Position */ +#define PMU_SWINC_CNT25_Msk (1UL << PMU_SWINC_CNT25_Pos) /*!< PMU SWINC: Event Counter 25 Software Increment Mask */ + +#define PMU_SWINC_CNT26_Pos 26U /*!< PMU SWINC: Event Counter 26 Software Increment Position */ +#define PMU_SWINC_CNT26_Msk (1UL << PMU_SWINC_CNT26_Pos) /*!< PMU SWINC: Event Counter 26 Software Increment Mask */ + +#define PMU_SWINC_CNT27_Pos 27U /*!< PMU SWINC: Event Counter 27 Software Increment Position */ +#define PMU_SWINC_CNT27_Msk (1UL << PMU_SWINC_CNT27_Pos) /*!< PMU SWINC: Event Counter 27 Software Increment Mask */ + +#define PMU_SWINC_CNT28_Pos 28U /*!< PMU SWINC: Event Counter 28 Software Increment Position */ +#define PMU_SWINC_CNT28_Msk (1UL << PMU_SWINC_CNT28_Pos) /*!< PMU SWINC: Event Counter 28 Software Increment Mask */ + +#define PMU_SWINC_CNT29_Pos 29U /*!< PMU SWINC: Event Counter 29 Software Increment Position */ +#define PMU_SWINC_CNT29_Msk (1UL << PMU_SWINC_CNT29_Pos) /*!< PMU SWINC: Event Counter 29 Software Increment Mask */ + +#define PMU_SWINC_CNT30_Pos 30U /*!< PMU SWINC: Event Counter 30 Software Increment Position */ +#define PMU_SWINC_CNT30_Msk (1UL << PMU_SWINC_CNT30_Pos) /*!< PMU SWINC: Event Counter 30 Software Increment Mask */ + +/** \brief PMU Control Register Definitions */ + +#define PMU_CTRL_ENABLE_Pos 0U /*!< PMU CTRL: ENABLE Position */ +#define PMU_CTRL_ENABLE_Msk (1UL /*<< PMU_CTRL_ENABLE_Pos*/) /*!< PMU CTRL: ENABLE Mask */ + +#define PMU_CTRL_EVENTCNT_RESET_Pos 1U /*!< PMU CTRL: Event Counter Reset Position */ +#define PMU_CTRL_EVENTCNT_RESET_Msk (1UL << PMU_CTRL_EVENTCNT_RESET_Pos) /*!< PMU CTRL: Event Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_RESET_Pos 2U /*!< PMU CTRL: Cycle Counter Reset Position */ +#define PMU_CTRL_CYCCNT_RESET_Msk (1UL << PMU_CTRL_CYCCNT_RESET_Pos) /*!< PMU CTRL: Cycle Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_DISABLE_Pos 5U /*!< PMU CTRL: Disable Cycle Counter Position */ +#define PMU_CTRL_CYCCNT_DISABLE_Msk (1UL << PMU_CTRL_CYCCNT_DISABLE_Pos) /*!< PMU CTRL: Disable Cycle Counter Mask */ + +#define PMU_CTRL_FRZ_ON_OV_Pos 9U /*!< PMU CTRL: Freeze-on-overflow Position */ +#define PMU_CTRL_FRZ_ON_OV_Msk (1UL << PMU_CTRL_FRZ_ON_OVERFLOW_Pos) /*!< PMU CTRL: Freeze-on-overflow Mask */ + +#define PMU_CTRL_TRACE_ON_OV_Pos 11U /*!< PMU CTRL: Trace-on-overflow Position */ +#define PMU_CTRL_TRACE_ON_OV_Msk (1UL << PMU_CTRL_TRACE_ON_OVERFLOW_Pos) /*!< PMU CTRL: Trace-on-overflow Mask */ + +/** \brief PMU Type Register Definitions */ + +#define PMU_TYPE_NUM_CNTS_Pos 0U /*!< PMU TYPE: Number of Counters Position */ +#define PMU_TYPE_NUM_CNTS_Msk (8UL /*<< PMU_TYPE_NUM_CNTS_Pos*/) /*!< PMU TYPE: Number of Counters Mask */ + +#define PMU_TYPE_SIZE_CNTS_Pos 8U /*!< PMU TYPE: Size of Counters Position */ +#define PMU_TYPE_SIZE_CNTS_Msk (6UL << PMU_TYPE_SIZE_CNTS_Pos) /*!< PMU TYPE: Size of Counters Mask */ + +#define PMU_TYPE_CYCCNT_PRESENT_Pos 14U /*!< PMU TYPE: Cycle Counter Present Position */ +#define PMU_TYPE_CYCCNT_PRESENT_Msk (1UL << PMU_TYPE_CYCCNT_PRESENT_Pos) /*!< PMU TYPE: Cycle Counter Present Mask */ + +#define PMU_TYPE_FRZ_OV_SUPPORT_Pos 21U /*!< PMU TYPE: Freeze-on-overflow Support Position */ +#define PMU_TYPE_FRZ_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Freeze-on-overflow Support Mask */ + +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Pos 23U /*!< PMU TYPE: Trace-on-overflow Support Position */ +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Trace-on-overflow Support Mask */ + +/*@} end of group CMSIS_PMU */ +#endif + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_PXN_Pos 4U /*!< MPU RLAR: PXN Position */ +#define MPU_RLAR_PXN_Msk (1UL << MPU_RLAR_PXN_Pos) /*!< MPU RLAR: PXN Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +#define FPU_FPDSCR_FZ16_Pos 19U /*!< FPDSCR: FZ16 bit Position */ +#define FPU_FPDSCR_FZ16_Msk (1UL << FPU_FPDSCR_FZ16_Pos) /*!< FPDSCR: FZ16 bit Mask */ + +#define FPU_FPDSCR_LTPSIZE_Pos 16U /*!< FPDSCR: LTPSIZE bit Position */ +#define FPU_FPDSCR_LTPSIZE_Msk (7UL << FPU_FPDSCR_LTPSIZE_Pos) /*!< FPDSCR: LTPSIZE bit Mask */ + +/* Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FPRound_Pos 28U /*!< MVFR0: FPRound bits Position */ +#define FPU_MVFR0_FPRound_Msk (0xFUL << FPU_MVFR0_FPRound_Pos) /*!< MVFR0: FPRound bits Mask */ + +#define FPU_MVFR0_FPSqrt_Pos 20U /*!< MVFR0: FPSqrt bits Position */ +#define FPU_MVFR0_FPSqrt_Msk (0xFUL << FPU_MVFR0_FPSqrt_Pos) /*!< MVFR0: FPSqrt bits Mask */ + +#define FPU_MVFR0_FPDivide_Pos 16U /*!< MVFR0: FPDivide bits Position */ +#define FPU_MVFR0_FPDivide_Msk (0xFUL << FPU_MVFR0_FPDivide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FPDP_Pos 8U /*!< MVFR0: FPDP bits Position */ +#define FPU_MVFR0_FPDP_Msk (0xFUL << FPU_MVFR0_FPDP_Pos) /*!< MVFR0: FPDP bits Mask */ + +#define FPU_MVFR0_FPSP_Pos 4U /*!< MVFR0: FPSP bits Position */ +#define FPU_MVFR0_FPSP_Msk (0xFUL << FPU_MVFR0_FPSP_Pos) /*!< MVFR0: FPSP bits Mask */ + +#define FPU_MVFR0_SIMDReg_Pos 0U /*!< MVFR0: SIMDReg bits Position */ +#define FPU_MVFR0_SIMDReg_Msk (0xFUL /*<< FPU_MVFR0_SIMDReg_Pos*/) /*!< MVFR0: SIMDReg bits Mask */ + +/* Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FMAC_Pos 28U /*!< MVFR1: FMAC bits Position */ +#define FPU_MVFR1_FMAC_Msk (0xFUL << FPU_MVFR1_FMAC_Pos) /*!< MVFR1: FMAC bits Mask */ + +#define FPU_MVFR1_FPHP_Pos 24U /*!< MVFR1: FPHP bits Position */ +#define FPU_MVFR1_FPHP_Msk (0xFUL << FPU_MVFR1_FPHP_Pos) /*!< MVFR1: FPHP bits Mask */ + +#define FPU_MVFR1_FP16_Pos 20U /*!< MVFR1: FP16 bits Position */ +#define FPU_MVFR1_FP16_Msk (0xFUL << FPU_MVFR1_FP16_Pos) /*!< MVFR1: FP16 bits Mask */ + +#define FPU_MVFR1_MVE_Pos 8U /*!< MVFR1: MVE bits Position */ +#define FPU_MVFR1_MVE_Msk (0xFUL << FPU_MVFR1_MVE_Pos) /*!< MVFR1: MVE bits Mask */ + +#define FPU_MVFR1_FPDNaN_Pos 4U /*!< MVFR1: FPDNaN bits Position */ +#define FPU_MVFR1_FPDNaN_Msk (0xFUL << FPU_MVFR1_FPDNaN_Pos) /*!< MVFR1: FPDNaN bits Mask */ + +#define FPU_MVFR1_FPFtZ_Pos 0U /*!< MVFR1: FPFtZ bits Position */ +#define FPU_MVFR1_FPFtZ_Msk (0xFUL /*<< FPU_MVFR1_FPFtZ_Pos*/) /*!< MVFR1: FPFtZ bits Mask */ + +/* Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: FPMisc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: FPMisc bits Mask */ + +/*@} end of group CMSIS_FPU */ + +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + __OM uint32_t DSCEMCR; /*!< Offset: 0x010 ( /W) Debug Set Clear Exception and Monitor Control Register */ + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_FPD_Pos 23U /*!< \deprecated CoreDebug DHCSR: S_FPD Position */ +#define CoreDebug_DHCSR_S_FPD_Msk (1UL << CoreDebug_DHCSR_S_FPD_Pos) /*!< \deprecated CoreDebug DHCSR: S_FPD Mask */ + +#define CoreDebug_DHCSR_S_SUIDE_Pos 22U /*!< \deprecated CoreDebug DHCSR: S_SUIDE Position */ +#define CoreDebug_DHCSR_S_SUIDE_Msk (1UL << CoreDebug_DHCSR_S_SUIDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_SUIDE Mask */ + +#define CoreDebug_DHCSR_S_NSUIDE_Pos 21U /*!< \deprecated CoreDebug DHCSR: S_NSUIDE Position */ +#define CoreDebug_DHCSR_S_NSUIDE_Msk (1UL << CoreDebug_DHCSR_S_NSUIDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_NSUIDE Mask */ + +#define CoreDebug_DHCSR_S_SDE_Pos 20U /*!< \deprecated CoreDebug DHCSR: S_SDE Position */ +#define CoreDebug_DHCSR_S_SDE_Msk (1UL << CoreDebug_DHCSR_S_SDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_SDE Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_PMOV_Pos 6U /*!< \deprecated CoreDebug DHCSR: C_PMOV Position */ +#define CoreDebug_DHCSR_C_PMOV_Msk (1UL << CoreDebug_DHCSR_C_PMOV_Pos) /*!< \deprecated CoreDebug DHCSR: C_PMOV Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< \deprecated CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< \deprecated CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< \deprecated CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< \deprecated CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< \deprecated CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< \deprecated CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< \deprecated CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< \deprecated CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< \deprecated CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< \deprecated CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Set Clear Exception and Monitor Control Register Definitions */ +#define CoreDebug_DSCEMCR_CLR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_REQ, Position */ +#define CoreDebug_DSCEMCR_CLR_MON_REQ_Msk (1UL << CoreDebug_DSCEMCR_CLR_MON_REQ_Pos) /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_REQ, Mask */ + +#define CoreDebug_DSCEMCR_CLR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_PEND, Position */ +#define CoreDebug_DSCEMCR_CLR_MON_PEND_Msk (1UL << CoreDebug_DSCEMCR_CLR_MON_PEND_Pos) /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_PEND, Mask */ + +#define CoreDebug_DSCEMCR_SET_MON_REQ_Pos 3U /*!< \deprecated CoreDebug DSCEMCR: SET_MON_REQ, Position */ +#define CoreDebug_DSCEMCR_SET_MON_REQ_Msk (1UL << CoreDebug_DSCEMCR_SET_MON_REQ_Pos) /*!< \deprecated CoreDebug DSCEMCR: SET_MON_REQ, Mask */ + +#define CoreDebug_DSCEMCR_SET_MON_PEND_Pos 1U /*!< \deprecated CoreDebug DSCEMCR: SET_MON_PEND, Position */ +#define CoreDebug_DSCEMCR_SET_MON_PEND_Msk (1UL << CoreDebug_DSCEMCR_SET_MON_PEND_Pos) /*!< \deprecated CoreDebug DSCEMCR: SET_MON_PEND, Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_UIDEN_Pos 10U /*!< \deprecated CoreDebug DAUTHCTRL: UIDEN, Position */ +#define CoreDebug_DAUTHCTRL_UIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_UIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: UIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_UIDAPEN_Pos 9U /*!< \deprecated CoreDebug DAUTHCTRL: UIDAPEN, Position */ +#define CoreDebug_DAUTHCTRL_UIDAPEN_Msk (1UL << CoreDebug_DAUTHCTRL_UIDAPEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: UIDAPEN, Mask */ + +#define CoreDebug_DAUTHCTRL_FSDMA_Pos 8U /*!< \deprecated CoreDebug DAUTHCTRL: FSDMA, Position */ +#define CoreDebug_DAUTHCTRL_FSDMA_Msk (1UL << CoreDebug_DAUTHCTRL_FSDMA_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: FSDMA, Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + __OM uint32_t DSCEMCR; /*!< Offset: 0x010 ( /W) Debug Set Clear Exception and Monitor Control Register */ + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_FPD_Pos 23U /*!< DCB DHCSR: Floating-point registers Debuggable Position */ +#define DCB_DHCSR_S_FPD_Msk (0x1UL << DCB_DHCSR_S_FPD_Pos) /*!< DCB DHCSR: Floating-point registers Debuggable Mask */ + +#define DCB_DHCSR_S_SUIDE_Pos 22U /*!< DCB DHCSR: Secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_SUIDE_Msk (0x1UL << DCB_DHCSR_S_SUIDE_Pos) /*!< DCB DHCSR: Secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_NSUIDE_Pos 21U /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_NSUIDE_Msk (0x1UL << DCB_DHCSR_S_NSUIDE_Pos) /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_PMOV_Pos 6U /*!< DCB DHCSR: Halt on PMU overflow control Position */ +#define DCB_DHCSR_C_PMOV_Msk (0x1UL << DCB_DHCSR_C_PMOV_Pos) /*!< DCB DHCSR: Halt on PMU overflow control Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (0x1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (0x1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (0x1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (0x1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (0x1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (0x1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (0x1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (0x1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (0x1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (0x1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (0x1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (0x1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (0x1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (0x1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (0x1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DSCEMCR, Debug Set Clear Exception and Monitor Control Register Definitions */ +#define DCB_DSCEMCR_CLR_MON_REQ_Pos 19U /*!< DCB DSCEMCR: Clear monitor request Position */ +#define DCB_DSCEMCR_CLR_MON_REQ_Msk (0x1UL << DCB_DSCEMCR_CLR_MON_REQ_Pos) /*!< DCB DSCEMCR: Clear monitor request Mask */ + +#define DCB_DSCEMCR_CLR_MON_PEND_Pos 17U /*!< DCB DSCEMCR: Clear monitor pend Position */ +#define DCB_DSCEMCR_CLR_MON_PEND_Msk (0x1UL << DCB_DSCEMCR_CLR_MON_PEND_Pos) /*!< DCB DSCEMCR: Clear monitor pend Mask */ + +#define DCB_DSCEMCR_SET_MON_REQ_Pos 3U /*!< DCB DSCEMCR: Set monitor request Position */ +#define DCB_DSCEMCR_SET_MON_REQ_Msk (0x1UL << DCB_DSCEMCR_SET_MON_REQ_Pos) /*!< DCB DSCEMCR: Set monitor request Mask */ + +#define DCB_DSCEMCR_SET_MON_PEND_Pos 1U /*!< DCB DSCEMCR: Set monitor pend Position */ +#define DCB_DSCEMCR_SET_MON_PEND_Msk (0x1UL << DCB_DSCEMCR_SET_MON_PEND_Pos) /*!< DCB DSCEMCR: Set monitor pend Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_UIDEN_Pos 10U /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Position */ +#define DCB_DAUTHCTRL_UIDEN_Msk (0x1UL << DCB_DAUTHCTRL_UIDEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Mask */ + +#define DCB_DAUTHCTRL_UIDAPEN_Pos 9U /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Position */ +#define DCB_DAUTHCTRL_UIDAPEN_Msk (0x1UL << DCB_DAUTHCTRL_UIDAPEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Mask */ + +#define DCB_DAUTHCTRL_FSDMA_Pos 8U /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Position */ +#define DCB_DAUTHCTRL_FSDMA_Msk (0x1UL << DCB_DAUTHCTRL_FSDMA_Pos) /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Mask */ + +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SUNID_Pos 22U /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUNID_Msk (0x3UL << DIB_DAUTHSTATUS_SUNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SUID_Pos 20U /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUID_Msk (0x3UL << DIB_DAUTHSTATUS_SUID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_NSUNID_Pos 18U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Position */ +#define DIB_DAUTHSTATUS_NSUNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Mask */ + +#define DIB_DAUTHSTATUS_NSUID_Pos 16U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_NSUID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + #define PMU_BASE (0xE0003000UL) /*!< PMU Base Address */ + #define PMU ((PMU_Type *) PMU_BASE ) /*!< PMU configuration struct */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define SCnSCB_NS ((SCnSCB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## PMU functions and events #################################### */ + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + +#include "pmu_armv8.h" + +/** + \brief Cortex-M55 PMU events + \note Architectural PMU events can be found in pmu_armv8.h +*/ + +#define ARMCM55_PMU_ECC_ERR 0xC000 /*!< Any ECC error */ +#define ARMCM55_PMU_ECC_ERR_FATAL 0xC001 /*!< Any fatal ECC error */ +#define ARMCM55_PMU_ECC_ERR_DCACHE 0xC010 /*!< Any ECC error in the data cache */ +#define ARMCM55_PMU_ECC_ERR_ICACHE 0xC011 /*!< Any ECC error in the instruction cache */ +#define ARMCM55_PMU_ECC_ERR_FATAL_DCACHE 0xC012 /*!< Any fatal ECC error in the data cache */ +#define ARMCM55_PMU_ECC_ERR_FATAL_ICACHE 0xC013 /*!< Any fatal ECC error in the instruction cache*/ +#define ARMCM55_PMU_ECC_ERR_DTCM 0xC020 /*!< Any ECC error in the DTCM */ +#define ARMCM55_PMU_ECC_ERR_ITCM 0xC021 /*!< Any ECC error in the ITCM */ +#define ARMCM55_PMU_ECC_ERR_FATAL_DTCM 0xC022 /*!< Any fatal ECC error in the DTCM */ +#define ARMCM55_PMU_ECC_ERR_FATAL_ITCM 0xC023 /*!< Any fatal ECC error in the ITCM */ +#define ARMCM55_PMU_PF_LINEFILL 0xC100 /*!< A prefetcher starts a line-fill */ +#define ARMCM55_PMU_PF_CANCEL 0xC101 /*!< A prefetcher stops prefetching */ +#define ARMCM55_PMU_PF_DROP_LINEFILL 0xC102 /*!< A linefill triggered by a prefetcher has been dropped because of lack of buffering */ +#define ARMCM55_PMU_NWAMODE_ENTER 0xC200 /*!< No write-allocate mode entry */ +#define ARMCM55_PMU_NWAMODE 0xC201 /*!< Write-allocate store is not allocated into the data cache due to no-write-allocate mode */ +#define ARMCM55_PMU_SAHB_ACCESS 0xC300 /*!< Read or write access on the S-AHB interface to the TCM */ +#define ARMCM55_PMU_DOSTIMEOUT_DOUBLE 0xC400 /*!< Denial of Service timeout has fired twice and caused buffers to drain to allow forward progress */ +#define ARMCM55_PMU_DOSTIMEOUT_TRIPLE 0xC401 /*!< Denial of Service timeout has fired three times and blocked the LSU to force forward progress */ + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + +/* ########################## MVE functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_MveFunctions MVE Functions + \brief Function that provides MVE type. + @{ + */ + +/** + \brief get MVE type + \details returns the MVE type + \returns + - \b 0: No Vector Extension (MVE) + - \b 1: Integer Vector Extension (MVE-I) + - \b 2: Floating-point Vector Extension (MVE-F) + */ +__STATIC_INLINE uint32_t SCB_GetMVEType(void) +{ + const uint32_t mvfr1 = FPU->MVFR1; + if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x2U << FPU_MVFR1_MVE_Pos)) + { + return 2U; + } + else if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x1U << FPU_MVFR1_MVE_Pos)) + { + return 1U; + } + else + { + return 0U; + } +} + + +/*@} end of CMSIS_Core_MveFunctions */ + + +/* ########################## Cache functions #################################### */ + +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) +#include "cachel1_armv7.h" +#endif + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM55_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm7.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm7.h new file mode 100755 index 00000000000..e1c31c275dc --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_cm7.h @@ -0,0 +1,2362 @@ +/**************************************************************************//** + * @file core_cm7.h + * @brief CMSIS Cortex-M7 Core Peripheral Access Layer Header File + * @version V5.1.2 + * @date 27. March 2020 + ******************************************************************************/ +/* + * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_CM7_H_GENERIC +#define __CORE_CM7_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M7 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM7 definitions */ +#define __CM7_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __CM7_CMSIS_VERSION_SUB ( __CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __CM7_CMSIS_VERSION ((__CM7_CMSIS_VERSION_MAIN << 16U) | \ + __CM7_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_M (7U) /*!< Cortex-M Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM7_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM7_H_DEPENDANT +#define __CORE_CM7_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM7_REV + #define __CM7_REV 0x0000U + #warning "__CM7_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DTCM_PRESENT + #define __DTCM_PRESENT 0U + #warning "__DTCM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M7 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:1; /*!< bit: 9 Reserved */ + uint32_t ICI_IT_1:6; /*!< bit: 10..15 ICI/IT part 1 */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit */ + uint32_t ICI_IT_2:2; /*!< bit: 25..26 ICI/IT part 2 */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_ICI_IT_2_Pos 25U /*!< xPSR: ICI/IT part 2 Position */ +#define xPSR_ICI_IT_2_Msk (3UL << xPSR_ICI_IT_2_Pos) /*!< xPSR: ICI/IT part 2 Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ICI_IT_1_Pos 10U /*!< xPSR: ICI/IT part 1 Position */ +#define xPSR_ICI_IT_1_Msk (0x3FUL << xPSR_ICI_IT_1_Pos) /*!< xPSR: ICI/IT part 1 Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t FPCA:1; /*!< bit: 2 FP extension active flag */ + uint32_t _reserved0:29; /*!< bit: 3..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[24U]; + __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[24U]; + __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[24U]; + __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[24U]; + __IOM uint32_t IABR[8U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[56U]; + __IOM uint8_t IP[240U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED5[644U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[1U]; + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + uint32_t RESERVED3[93U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + uint32_t RESERVED4[15U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + uint32_t RESERVED7[6U]; + __IOM uint32_t ITCMCR; /*!< Offset: 0x290 (R/W) Instruction Tightly-Coupled Memory Control Register */ + __IOM uint32_t DTCMCR; /*!< Offset: 0x294 (R/W) Data Tightly-Coupled Memory Control Registers */ + __IOM uint32_t AHBPCR; /*!< Offset: 0x298 (R/W) AHBP Control Register */ + __IOM uint32_t CACR; /*!< Offset: 0x29C (R/W) L1 Cache Control Register */ + __IOM uint32_t AHBSCR; /*!< Offset: 0x2A0 (R/W) AHB Slave Control Register */ + uint32_t RESERVED8[1U]; + __IOM uint32_t ABFSR; /*!< Offset: 0x2A8 (R/W) Auxiliary Bus Fault Status Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +#define SCB_AIRCR_VECTRESET_Pos 0U /*!< SCB AIRCR: VECTRESET Position */ +#define SCB_AIRCR_VECTRESET_Msk (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/) /*!< SCB AIRCR: VECTRESET Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: Branch prediction enable bit Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: Branch prediction enable bit Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: Instruction cache enable bit Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: Instruction cache enable bit Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: Cache enable bit Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: Cache enable bit Mask */ + +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +#define SCB_CCR_NONBASETHRDENA_Pos 0U /*!< SCB CCR: NONBASETHRDENA Position */ +#define SCB_CCR_NONBASETHRDENA_Msk (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/) /*!< SCB CCR: NONBASETHRDENA Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/* Instruction Tightly-Coupled Memory Control Register Definitions */ +#define SCB_ITCMCR_SZ_Pos 3U /*!< SCB ITCMCR: SZ Position */ +#define SCB_ITCMCR_SZ_Msk (0xFUL << SCB_ITCMCR_SZ_Pos) /*!< SCB ITCMCR: SZ Mask */ + +#define SCB_ITCMCR_RETEN_Pos 2U /*!< SCB ITCMCR: RETEN Position */ +#define SCB_ITCMCR_RETEN_Msk (1UL << SCB_ITCMCR_RETEN_Pos) /*!< SCB ITCMCR: RETEN Mask */ + +#define SCB_ITCMCR_RMW_Pos 1U /*!< SCB ITCMCR: RMW Position */ +#define SCB_ITCMCR_RMW_Msk (1UL << SCB_ITCMCR_RMW_Pos) /*!< SCB ITCMCR: RMW Mask */ + +#define SCB_ITCMCR_EN_Pos 0U /*!< SCB ITCMCR: EN Position */ +#define SCB_ITCMCR_EN_Msk (1UL /*<< SCB_ITCMCR_EN_Pos*/) /*!< SCB ITCMCR: EN Mask */ + +/* Data Tightly-Coupled Memory Control Register Definitions */ +#define SCB_DTCMCR_SZ_Pos 3U /*!< SCB DTCMCR: SZ Position */ +#define SCB_DTCMCR_SZ_Msk (0xFUL << SCB_DTCMCR_SZ_Pos) /*!< SCB DTCMCR: SZ Mask */ + +#define SCB_DTCMCR_RETEN_Pos 2U /*!< SCB DTCMCR: RETEN Position */ +#define SCB_DTCMCR_RETEN_Msk (1UL << SCB_DTCMCR_RETEN_Pos) /*!< SCB DTCMCR: RETEN Mask */ + +#define SCB_DTCMCR_RMW_Pos 1U /*!< SCB DTCMCR: RMW Position */ +#define SCB_DTCMCR_RMW_Msk (1UL << SCB_DTCMCR_RMW_Pos) /*!< SCB DTCMCR: RMW Mask */ + +#define SCB_DTCMCR_EN_Pos 0U /*!< SCB DTCMCR: EN Position */ +#define SCB_DTCMCR_EN_Msk (1UL /*<< SCB_DTCMCR_EN_Pos*/) /*!< SCB DTCMCR: EN Mask */ + +/* AHBP Control Register Definitions */ +#define SCB_AHBPCR_SZ_Pos 1U /*!< SCB AHBPCR: SZ Position */ +#define SCB_AHBPCR_SZ_Msk (7UL << SCB_AHBPCR_SZ_Pos) /*!< SCB AHBPCR: SZ Mask */ + +#define SCB_AHBPCR_EN_Pos 0U /*!< SCB AHBPCR: EN Position */ +#define SCB_AHBPCR_EN_Msk (1UL /*<< SCB_AHBPCR_EN_Pos*/) /*!< SCB AHBPCR: EN Mask */ + +/* L1 Cache Control Register Definitions */ +#define SCB_CACR_FORCEWT_Pos 2U /*!< SCB CACR: FORCEWT Position */ +#define SCB_CACR_FORCEWT_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: FORCEWT Mask */ + +#define SCB_CACR_ECCEN_Pos 1U /*!< SCB CACR: ECCEN Position */ +#define SCB_CACR_ECCEN_Msk (1UL << SCB_CACR_ECCEN_Pos) /*!< SCB CACR: ECCEN Mask */ + +#define SCB_CACR_SIWT_Pos 0U /*!< SCB CACR: SIWT Position */ +#define SCB_CACR_SIWT_Msk (1UL /*<< SCB_CACR_SIWT_Pos*/) /*!< SCB CACR: SIWT Mask */ + +/* AHBS Control Register Definitions */ +#define SCB_AHBSCR_INITCOUNT_Pos 11U /*!< SCB AHBSCR: INITCOUNT Position */ +#define SCB_AHBSCR_INITCOUNT_Msk (0x1FUL << SCB_AHBPCR_INITCOUNT_Pos) /*!< SCB AHBSCR: INITCOUNT Mask */ + +#define SCB_AHBSCR_TPRI_Pos 2U /*!< SCB AHBSCR: TPRI Position */ +#define SCB_AHBSCR_TPRI_Msk (0x1FFUL << SCB_AHBPCR_TPRI_Pos) /*!< SCB AHBSCR: TPRI Mask */ + +#define SCB_AHBSCR_CTL_Pos 0U /*!< SCB AHBSCR: CTL Position*/ +#define SCB_AHBSCR_CTL_Msk (3UL /*<< SCB_AHBPCR_CTL_Pos*/) /*!< SCB AHBSCR: CTL Mask */ + +/* Auxiliary Bus Fault Status Register Definitions */ +#define SCB_ABFSR_AXIMTYPE_Pos 8U /*!< SCB ABFSR: AXIMTYPE Position*/ +#define SCB_ABFSR_AXIMTYPE_Msk (3UL << SCB_ABFSR_AXIMTYPE_Pos) /*!< SCB ABFSR: AXIMTYPE Mask */ + +#define SCB_ABFSR_EPPB_Pos 4U /*!< SCB ABFSR: EPPB Position*/ +#define SCB_ABFSR_EPPB_Msk (1UL << SCB_ABFSR_EPPB_Pos) /*!< SCB ABFSR: EPPB Mask */ + +#define SCB_ABFSR_AXIM_Pos 3U /*!< SCB ABFSR: AXIM Position*/ +#define SCB_ABFSR_AXIM_Msk (1UL << SCB_ABFSR_AXIM_Pos) /*!< SCB ABFSR: AXIM Mask */ + +#define SCB_ABFSR_AHBP_Pos 2U /*!< SCB ABFSR: AHBP Position*/ +#define SCB_ABFSR_AHBP_Msk (1UL << SCB_ABFSR_AHBP_Pos) /*!< SCB ABFSR: AHBP Mask */ + +#define SCB_ABFSR_DTCM_Pos 1U /*!< SCB ABFSR: DTCM Position*/ +#define SCB_ABFSR_DTCM_Msk (1UL << SCB_ABFSR_DTCM_Pos) /*!< SCB ABFSR: DTCM Mask */ + +#define SCB_ABFSR_ITCM_Pos 0U /*!< SCB ABFSR: ITCM Position*/ +#define SCB_ABFSR_ITCM_Msk (1UL /*<< SCB_ABFSR_ITCM_Pos*/) /*!< SCB ABFSR: ITCM Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISDYNADD_Pos 26U /*!< ACTLR: DISDYNADD Position */ +#define SCnSCB_ACTLR_DISDYNADD_Msk (1UL << SCnSCB_ACTLR_DISDYNADD_Pos) /*!< ACTLR: DISDYNADD Mask */ + +#define SCnSCB_ACTLR_DISISSCH1_Pos 21U /*!< ACTLR: DISISSCH1 Position */ +#define SCnSCB_ACTLR_DISISSCH1_Msk (0x1FUL << SCnSCB_ACTLR_DISISSCH1_Pos) /*!< ACTLR: DISISSCH1 Mask */ + +#define SCnSCB_ACTLR_DISDI_Pos 16U /*!< ACTLR: DISDI Position */ +#define SCnSCB_ACTLR_DISDI_Msk (0x1FUL << SCnSCB_ACTLR_DISDI_Pos) /*!< ACTLR: DISDI Mask */ + +#define SCnSCB_ACTLR_DISCRITAXIRUR_Pos 15U /*!< ACTLR: DISCRITAXIRUR Position */ +#define SCnSCB_ACTLR_DISCRITAXIRUR_Msk (1UL << SCnSCB_ACTLR_DISCRITAXIRUR_Pos) /*!< ACTLR: DISCRITAXIRUR Mask */ + +#define SCnSCB_ACTLR_DISBTACALLOC_Pos 14U /*!< ACTLR: DISBTACALLOC Position */ +#define SCnSCB_ACTLR_DISBTACALLOC_Msk (1UL << SCnSCB_ACTLR_DISBTACALLOC_Pos) /*!< ACTLR: DISBTACALLOC Mask */ + +#define SCnSCB_ACTLR_DISBTACREAD_Pos 13U /*!< ACTLR: DISBTACREAD Position */ +#define SCnSCB_ACTLR_DISBTACREAD_Msk (1UL << SCnSCB_ACTLR_DISBTACREAD_Pos) /*!< ACTLR: DISBTACREAD Mask */ + +#define SCnSCB_ACTLR_DISITMATBFLUSH_Pos 12U /*!< ACTLR: DISITMATBFLUSH Position */ +#define SCnSCB_ACTLR_DISITMATBFLUSH_Msk (1UL << SCnSCB_ACTLR_DISITMATBFLUSH_Pos) /*!< ACTLR: DISITMATBFLUSH Mask */ + +#define SCnSCB_ACTLR_DISRAMODE_Pos 11U /*!< ACTLR: DISRAMODE Position */ +#define SCnSCB_ACTLR_DISRAMODE_Msk (1UL << SCnSCB_ACTLR_DISRAMODE_Pos) /*!< ACTLR: DISRAMODE Mask */ + +#define SCnSCB_ACTLR_FPEXCODIS_Pos 10U /*!< ACTLR: FPEXCODIS Position */ +#define SCnSCB_ACTLR_FPEXCODIS_Msk (1UL << SCnSCB_ACTLR_FPEXCODIS_Pos) /*!< ACTLR: FPEXCODIS Mask */ + +#define SCnSCB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define SCnSCB_ACTLR_DISFOLD_Msk (1UL << SCnSCB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[6U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TraceBusID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TraceBusID_Msk (0x7FUL << ITM_TCR_TraceBusID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPrescale_Pos 8U /*!< ITM TCR: TSPrescale Position */ +#define ITM_TCR_TSPrescale_Msk (3UL << ITM_TCR_TSPrescale_Pos) /*!< ITM TCR: TSPrescale Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + __IOM uint32_t MASK0; /*!< Offset: 0x024 (R/W) Mask Register 0 */ + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED0[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + __IOM uint32_t MASK1; /*!< Offset: 0x034 (R/W) Mask Register 1 */ + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + __IOM uint32_t MASK2; /*!< Offset: 0x044 (R/W) Mask Register 2 */ + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + __IOM uint32_t MASK3; /*!< Offset: 0x054 (R/W) Mask Register 3 */ + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED3[981U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( W) Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Mask Register Definitions */ +#define DWT_MASK_MASK_Pos 0U /*!< DWT MASK: MASK Position */ +#define DWT_MASK_MASK_Msk (0x1FUL /*<< DWT_MASK_MASK_Pos*/) /*!< DWT MASK: MASK Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVADDR1_Pos 16U /*!< DWT FUNCTION: DATAVADDR1 Position */ +#define DWT_FUNCTION_DATAVADDR1_Msk (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos) /*!< DWT FUNCTION: DATAVADDR1 Mask */ + +#define DWT_FUNCTION_DATAVADDR0_Pos 12U /*!< DWT FUNCTION: DATAVADDR0 Position */ +#define DWT_FUNCTION_DATAVADDR0_Msk (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos) /*!< DWT FUNCTION: DATAVADDR0 Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_LNK1ENA_Pos 9U /*!< DWT FUNCTION: LNK1ENA Position */ +#define DWT_FUNCTION_LNK1ENA_Msk (0x1UL << DWT_FUNCTION_LNK1ENA_Pos) /*!< DWT FUNCTION: LNK1ENA Mask */ + +#define DWT_FUNCTION_DATAVMATCH_Pos 8U /*!< DWT FUNCTION: DATAVMATCH Position */ +#define DWT_FUNCTION_DATAVMATCH_Msk (0x1UL << DWT_FUNCTION_DATAVMATCH_Pos) /*!< DWT FUNCTION: DATAVMATCH Mask */ + +#define DWT_FUNCTION_CYCMATCH_Pos 7U /*!< DWT FUNCTION: CYCMATCH Position */ +#define DWT_FUNCTION_CYCMATCH_Msk (0x1UL << DWT_FUNCTION_CYCMATCH_Pos) /*!< DWT FUNCTION: CYCMATCH Mask */ + +#define DWT_FUNCTION_EMITRANGE_Pos 5U /*!< DWT FUNCTION: EMITRANGE Position */ +#define DWT_FUNCTION_EMITRANGE_Msk (0x1UL << DWT_FUNCTION_EMITRANGE_Pos) /*!< DWT FUNCTION: EMITRANGE Mask */ + +#define DWT_FUNCTION_FUNCTION_Pos 0U /*!< DWT FUNCTION: FUNCTION Position */ +#define DWT_FUNCTION_FUNCTION_Msk (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/) /*!< DWT FUNCTION: FUNCTION Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IM uint32_t FSCR; /*!< Offset: 0x308 (R/ ) Formatter Synchronization Counter Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t FIFO0; /*!< Offset: 0xEEC (R/ ) Integration ETM Data */ + __IM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/ ) ITATBCTR2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) ITATBCTR0 */ + __IM uint32_t FIFO1; /*!< Offset: 0xEFC (R/ ) Integration ITM Data */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) TPIU_DEVID */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) TPIU_DEVTYPE */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration ETM Data Register Definitions (FIFO0) */ +#define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ +#define TPI_FIFO0_ITM_ATVALID_Msk (0x1UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ + +#define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ +#define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ + +#define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ +#define TPI_FIFO0_ETM_ATVALID_Msk (0x1UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ + +#define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ +#define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ + +#define TPI_FIFO0_ETM2_Pos 16U /*!< TPI FIFO0: ETM2 Position */ +#define TPI_FIFO0_ETM2_Msk (0xFFUL << TPI_FIFO0_ETM2_Pos) /*!< TPI FIFO0: ETM2 Mask */ + +#define TPI_FIFO0_ETM1_Pos 8U /*!< TPI FIFO0: ETM1 Position */ +#define TPI_FIFO0_ETM1_Msk (0xFFUL << TPI_FIFO0_ETM1_Pos) /*!< TPI FIFO0: ETM1 Mask */ + +#define TPI_FIFO0_ETM0_Pos 0U /*!< TPI FIFO0: ETM0 Position */ +#define TPI_FIFO0_ETM0_Msk (0xFFUL /*<< TPI_FIFO0_ETM0_Pos*/) /*!< TPI FIFO0: ETM0 Mask */ + +/* TPI ITATBCTR2 Register Definitions */ +#define TPI_ITATBCTR2_ATREADY2_Pos 0U /*!< TPI ITATBCTR2: ATREADY2 Position */ +#define TPI_ITATBCTR2_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2_Pos*/) /*!< TPI ITATBCTR2: ATREADY2 Mask */ + +#define TPI_ITATBCTR2_ATREADY1_Pos 0U /*!< TPI ITATBCTR2: ATREADY1 Position */ +#define TPI_ITATBCTR2_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1_Pos*/) /*!< TPI ITATBCTR2: ATREADY1 Mask */ + +/* TPI Integration ITM Data Register Definitions (FIFO1) */ +#define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ +#define TPI_FIFO1_ITM_ATVALID_Msk (0x1UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ + +#define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ +#define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ + +#define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ +#define TPI_FIFO1_ETM_ATVALID_Msk (0x1UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ + +#define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ +#define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ + +#define TPI_FIFO1_ITM2_Pos 16U /*!< TPI FIFO1: ITM2 Position */ +#define TPI_FIFO1_ITM2_Msk (0xFFUL << TPI_FIFO1_ITM2_Pos) /*!< TPI FIFO1: ITM2 Mask */ + +#define TPI_FIFO1_ITM1_Pos 8U /*!< TPI FIFO1: ITM1 Position */ +#define TPI_FIFO1_ITM1_Msk (0xFFUL << TPI_FIFO1_ITM1_Pos) /*!< TPI FIFO1: ITM1 Mask */ + +#define TPI_FIFO1_ITM0_Pos 0U /*!< TPI FIFO1: ITM0 Position */ +#define TPI_FIFO1_ITM0_Msk (0xFFUL /*<< TPI_FIFO1_ITM0_Pos*/) /*!< TPI FIFO1: ITM0 Mask */ + +/* TPI ITATBCTR0 Register Definitions */ +#define TPI_ITATBCTR0_ATREADY2_Pos 0U /*!< TPI ITATBCTR0: ATREADY2 Position */ +#define TPI_ITATBCTR0_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2_Pos*/) /*!< TPI ITATBCTR0: ATREADY2 Mask */ + +#define TPI_ITATBCTR0_ATREADY1_Pos 0U /*!< TPI ITATBCTR0: ATREADY1 Position */ +#define TPI_ITATBCTR0_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1_Pos*/) /*!< TPI ITATBCTR0: ATREADY1 Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_MinBufSz_Pos 6U /*!< TPI DEVID: MinBufSz Position */ +#define TPI_DEVID_MinBufSz_Msk (0x7UL << TPI_DEVID_MinBufSz_Pos) /*!< TPI DEVID: MinBufSz Mask */ + +#define TPI_DEVID_AsynClkIn_Pos 5U /*!< TPI DEVID: AsynClkIn Position */ +#define TPI_DEVID_AsynClkIn_Msk (0x1UL << TPI_DEVID_AsynClkIn_Pos) /*!< TPI DEVID: AsynClkIn Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x1FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Alias 1 Region Base Address Register */ + __IOM uint32_t RASR_A1; /*!< Offset: 0x018 (R/W) MPU Alias 1 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Alias 2 Region Base Address Register */ + __IOM uint32_t RASR_A2; /*!< Offset: 0x020 (R/W) MPU Alias 2 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Alias 3 Region Base Address Register */ + __IOM uint32_t RASR_A3; /*!< Offset: 0x028 (R/W) MPU Alias 3 Region Attribute and Size Register */ +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 5U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif /* defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and FP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and FP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and FP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/* Media and FP Feature Register 0 Definitions */ +#define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ +#define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ + +#define FPU_MVFR0_Short_vectors_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_Short_vectors_Msk (0xFUL << FPU_MVFR0_Short_vectors_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_Square_root_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_Square_root_Msk (0xFUL << FPU_MVFR0_Square_root_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_Divide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_Divide_Msk (0xFUL << FPU_MVFR0_Divide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FP_excep_trapping_Pos 12U /*!< MVFR0: FP exception trapping bits Position */ +#define FPU_MVFR0_FP_excep_trapping_Msk (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos) /*!< MVFR0: FP exception trapping bits Mask */ + +#define FPU_MVFR0_Double_precision_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_Double_precision_Msk (0xFUL << FPU_MVFR0_Double_precision_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_Single_precision_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_Single_precision_Msk (0xFUL << FPU_MVFR0_Single_precision_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ +#define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ + +/* Media and FP Feature Register 1 Definitions */ +#define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ +#define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ + +#define FPU_MVFR1_FP_HPFP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FP_HPFP_Msk (0xFUL << FPU_MVFR1_FP_HPFP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_D_NaN_mode_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_D_NaN_mode_Msk (0xFUL << FPU_MVFR1_D_NaN_mode_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/* Media and FP Feature Register 2 Definitions */ + +#define FPU_MVFR2_VFP_Misc_Pos 4U /*!< MVFR2: VFP Misc bits Position */ +#define FPU_MVFR2_VFP_Misc_Msk (0xFUL << FPU_MVFR2_VFP_Misc_Pos) /*!< MVFR2: VFP Misc bits Mask */ + +/*@} end of group CMSIS_FPU */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ +#define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ +#define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ +#define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ +#define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ +#define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ +#define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ +#define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE) /*!< Core Debug configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +#define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ +#define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ +#define EXC_RETURN_HANDLER_FPU (0xFFFFFFE1UL) /* return to Handler mode, uses MSP after return, restore floating-point state */ +#define EXC_RETURN_THREAD_MSP_FPU (0xFFFFFFE9UL) /* return to Thread mode, uses MSP after return, restore floating-point state */ +#define EXC_RETURN_THREAD_PSP_FPU (0xFFFFFFEDUL) /* return to Thread mode, uses PSP after return, restore floating-point state */ + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IP[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv7.h" + +#endif + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = SCB->MVFR0; + if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + +/*@} end of CMSIS_Core_FpuFunctions */ + + +/* ########################## Cache functions #################################### */ + +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) +#include "cachel1_armv7.h" +#endif + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM7_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_sc000.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_sc000.h new file mode 100755 index 00000000000..dbc755fff39 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_sc000.h @@ -0,0 +1,1030 @@ +/**************************************************************************//** + * @file core_sc000.h + * @brief CMSIS SC000 Core Peripheral Access Layer Header File + * @version V5.0.7 + * @date 27. March 2020 + ******************************************************************************/ +/* + * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_SC000_H_GENERIC +#define __CORE_SC000_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup SC000 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS SC000 definitions */ +#define __SC000_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __SC000_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __SC000_CMSIS_VERSION ((__SC000_CMSIS_VERSION_MAIN << 16U) | \ + __SC000_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_SC (000U) /*!< Cortex secure core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_SC000_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_SC000_H_DEPENDANT +#define __CORE_SC000_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __SC000_REV + #define __SC000_REV 0x0000U + #warning "__SC000_REV not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 0U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 2U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group SC000 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core MPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t _reserved0:1; /*!< bit: 0 Reserved */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[31U]; + __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[31U]; + __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[31U]; + __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[31U]; + uint32_t RESERVED4[64U]; + __IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */ +} NVIC_Type; + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + uint32_t RESERVED1[154U]; + __IOM uint32_t SFCR; /*!< Offset: 0x290 (R/W) Security Features Control Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ +} MPU_Type; + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 8U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0xFFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief SC000 Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor. + Therefore they are not covered by the SC000 header file. + @{ + */ +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else +/*#define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping not available for SC000 */ +/*#define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping not available for SC000 */ + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ +/*#define NVIC_GetActive __NVIC_GetActive not available for SC000 */ + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/* Interrupt Priorities are WORD accessible only under Armv6-M */ +/* The following MACROS handle generation of the register offset and byte masks */ +#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL) +#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) ) +#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) ) + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } + else + { + SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) | + (((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn))); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + /* ARM Application Note 321 states that the M0 and M0+ do not require the architectural barrier - assume SC000 is the same */ +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + SCB_AIRCR_SYSRESETREQ_Msk); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_SC000_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_sc300.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_sc300.h new file mode 100755 index 00000000000..e8914ba601d --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/core_sc300.h @@ -0,0 +1,1917 @@ +/**************************************************************************//** + * @file core_sc300.h + * @brief CMSIS SC300 Core Peripheral Access Layer Header File + * @version V5.0.9 + * @date 27. March 2020 + ******************************************************************************/ +/* + * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef __CORE_SC300_H_GENERIC +#define __CORE_SC300_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup SC3000 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS SC300 definitions */ +#define __SC300_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */ +#define __SC300_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */ +#define __SC300_CMSIS_VERSION ((__SC300_CMSIS_VERSION_MAIN << 16U) | \ + __SC300_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */ + +#define __CORTEX_SC (300U) /*!< Cortex secure core */ + +/** __FPU_USED indicates whether an FPU is used or not. + This core does not support an FPU at all +*/ +#define __FPU_USED 0U + +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #endif + +#endif + +#include "cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_SC300_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_SC300_H_DEPENDANT +#define __CORE_SC300_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __SC300_REV + #define __SC300_REV 0x0000U + #warning "__SC300_REV not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group SC300 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:27; /*!< bit: 0..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:1; /*!< bit: 9 Reserved */ + uint32_t ICI_IT_1:6; /*!< bit: 10..15 ICI/IT part 1 */ + uint32_t _reserved1:8; /*!< bit: 16..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit */ + uint32_t ICI_IT_2:2; /*!< bit: 25..26 ICI/IT part 2 */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_ICI_IT_2_Pos 25U /*!< xPSR: ICI/IT part 2 Position */ +#define xPSR_ICI_IT_2_Msk (3UL << xPSR_ICI_IT_2_Pos) /*!< xPSR: ICI/IT part 2 Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_ICI_IT_1_Pos 10U /*!< xPSR: ICI/IT part 1 Position */ +#define xPSR_ICI_IT_1_Msk (0x3FUL << xPSR_ICI_IT_1_Pos) /*!< xPSR: ICI/IT part 1 Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */ + uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[24U]; + __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RESERVED1[24U]; + __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[24U]; + __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[24U]; + __IOM uint32_t IABR[8U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[56U]; + __IOM uint8_t IP[240U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED5[644U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHP[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[5U]; + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + uint32_t RESERVED1[129U]; + __IOM uint32_t SFCR; /*!< Offset: 0x290 (R/W) Security Features Control Register */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */ +#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLBASE_Pos 29U /*!< SCB VTOR: TBLBASE Position */ +#define SCB_VTOR_TBLBASE_Msk (1UL << SCB_VTOR_TBLBASE_Pos) /*!< SCB VTOR: TBLBASE Mask */ + +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x3FFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +#define SCB_AIRCR_VECTRESET_Pos 0U /*!< SCB AIRCR: VECTRESET Position */ +#define SCB_AIRCR_VECTRESET_Msk (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/) /*!< SCB AIRCR: VECTRESET Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */ +#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +#define SCB_CCR_NONBASETHRDENA_Pos 0U /*!< SCB CCR: NONBASETHRDENA Position */ +#define SCB_CCR_NONBASETHRDENA_Msk (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/) /*!< SCB CCR: NONBASETHRDENA Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define SCnSCB_ACTLR_DISFOLD_Msk (1UL << SCnSCB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ + +#define SCnSCB_ACTLR_DISDEFWBUF_Pos 1U /*!< ACTLR: DISDEFWBUF Position */ +#define SCnSCB_ACTLR_DISDEFWBUF_Msk (1UL << SCnSCB_ACTLR_DISDEFWBUF_Pos) /*!< ACTLR: DISDEFWBUF Mask */ + +#define SCnSCB_ACTLR_DISMCYCINT_Pos 0U /*!< ACTLR: DISMCYCINT Position */ +#define SCnSCB_ACTLR_DISMCYCINT_Msk (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/) /*!< ACTLR: DISMCYCINT Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[6U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TraceBusID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TraceBusID_Msk (0x7FUL << ITM_TCR_TraceBusID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPrescale_Pos 8U /*!< ITM TCR: TSPrescale Position */ +#define ITM_TCR_TSPrescale_Msk (3UL << ITM_TCR_TSPrescale_Pos) /*!< ITM TCR: TSPrescale Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + __IOM uint32_t MASK0; /*!< Offset: 0x024 (R/W) Mask Register 0 */ + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED0[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + __IOM uint32_t MASK1; /*!< Offset: 0x034 (R/W) Mask Register 1 */ + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + __IOM uint32_t MASK2; /*!< Offset: 0x044 (R/W) Mask Register 2 */ + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + __IOM uint32_t MASK3; /*!< Offset: 0x054 (R/W) Mask Register 3 */ + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Mask Register Definitions */ +#define DWT_MASK_MASK_Pos 0U /*!< DWT MASK: MASK Position */ +#define DWT_MASK_MASK_Msk (0x1FUL /*<< DWT_MASK_MASK_Pos*/) /*!< DWT MASK: MASK Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVADDR1_Pos 16U /*!< DWT FUNCTION: DATAVADDR1 Position */ +#define DWT_FUNCTION_DATAVADDR1_Msk (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos) /*!< DWT FUNCTION: DATAVADDR1 Mask */ + +#define DWT_FUNCTION_DATAVADDR0_Pos 12U /*!< DWT FUNCTION: DATAVADDR0 Position */ +#define DWT_FUNCTION_DATAVADDR0_Msk (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos) /*!< DWT FUNCTION: DATAVADDR0 Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_LNK1ENA_Pos 9U /*!< DWT FUNCTION: LNK1ENA Position */ +#define DWT_FUNCTION_LNK1ENA_Msk (0x1UL << DWT_FUNCTION_LNK1ENA_Pos) /*!< DWT FUNCTION: LNK1ENA Mask */ + +#define DWT_FUNCTION_DATAVMATCH_Pos 8U /*!< DWT FUNCTION: DATAVMATCH Position */ +#define DWT_FUNCTION_DATAVMATCH_Msk (0x1UL << DWT_FUNCTION_DATAVMATCH_Pos) /*!< DWT FUNCTION: DATAVMATCH Mask */ + +#define DWT_FUNCTION_CYCMATCH_Pos 7U /*!< DWT FUNCTION: CYCMATCH Position */ +#define DWT_FUNCTION_CYCMATCH_Msk (0x1UL << DWT_FUNCTION_CYCMATCH_Pos) /*!< DWT FUNCTION: CYCMATCH Mask */ + +#define DWT_FUNCTION_EMITRANGE_Pos 5U /*!< DWT FUNCTION: EMITRANGE Position */ +#define DWT_FUNCTION_EMITRANGE_Msk (0x1UL << DWT_FUNCTION_EMITRANGE_Pos) /*!< DWT FUNCTION: EMITRANGE Mask */ + +#define DWT_FUNCTION_FUNCTION_Pos 0U /*!< DWT FUNCTION: FUNCTION Position */ +#define DWT_FUNCTION_FUNCTION_Msk (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/) /*!< DWT FUNCTION: FUNCTION Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IM uint32_t FSCR; /*!< Offset: 0x308 (R/ ) Formatter Synchronization Counter Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t FIFO0; /*!< Offset: 0xEEC (R/ ) Integration ETM Data */ + __IM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/ ) ITATBCTR2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) ITATBCTR0 */ + __IM uint32_t FIFO1; /*!< Offset: 0xEFC (R/ ) Integration ITM Data */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) TPIU_DEVID */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) TPIU_DEVTYPE */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration ETM Data Register Definitions (FIFO0) */ +#define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ +#define TPI_FIFO0_ITM_ATVALID_Msk (0x1UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ + +#define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ +#define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ + +#define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ +#define TPI_FIFO0_ETM_ATVALID_Msk (0x1UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ + +#define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ +#define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ + +#define TPI_FIFO0_ETM2_Pos 16U /*!< TPI FIFO0: ETM2 Position */ +#define TPI_FIFO0_ETM2_Msk (0xFFUL << TPI_FIFO0_ETM2_Pos) /*!< TPI FIFO0: ETM2 Mask */ + +#define TPI_FIFO0_ETM1_Pos 8U /*!< TPI FIFO0: ETM1 Position */ +#define TPI_FIFO0_ETM1_Msk (0xFFUL << TPI_FIFO0_ETM1_Pos) /*!< TPI FIFO0: ETM1 Mask */ + +#define TPI_FIFO0_ETM0_Pos 0U /*!< TPI FIFO0: ETM0 Position */ +#define TPI_FIFO0_ETM0_Msk (0xFFUL /*<< TPI_FIFO0_ETM0_Pos*/) /*!< TPI FIFO0: ETM0 Mask */ + +/* TPI ITATBCTR2 Register Definitions */ +#define TPI_ITATBCTR2_ATREADY2_Pos 0U /*!< TPI ITATBCTR2: ATREADY2 Position */ +#define TPI_ITATBCTR2_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2_Pos*/) /*!< TPI ITATBCTR2: ATREADY2 Mask */ + +#define TPI_ITATBCTR2_ATREADY1_Pos 0U /*!< TPI ITATBCTR2: ATREADY1 Position */ +#define TPI_ITATBCTR2_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1_Pos*/) /*!< TPI ITATBCTR2: ATREADY1 Mask */ + +/* TPI Integration ITM Data Register Definitions (FIFO1) */ +#define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ +#define TPI_FIFO1_ITM_ATVALID_Msk (0x1UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ + +#define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ +#define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ + +#define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ +#define TPI_FIFO1_ETM_ATVALID_Msk (0x1UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ + +#define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ +#define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ + +#define TPI_FIFO1_ITM2_Pos 16U /*!< TPI FIFO1: ITM2 Position */ +#define TPI_FIFO1_ITM2_Msk (0xFFUL << TPI_FIFO1_ITM2_Pos) /*!< TPI FIFO1: ITM2 Mask */ + +#define TPI_FIFO1_ITM1_Pos 8U /*!< TPI FIFO1: ITM1 Position */ +#define TPI_FIFO1_ITM1_Msk (0xFFUL << TPI_FIFO1_ITM1_Pos) /*!< TPI FIFO1: ITM1 Mask */ + +#define TPI_FIFO1_ITM0_Pos 0U /*!< TPI FIFO1: ITM0 Position */ +#define TPI_FIFO1_ITM0_Msk (0xFFUL /*<< TPI_FIFO1_ITM0_Pos*/) /*!< TPI FIFO1: ITM0 Mask */ + +/* TPI ITATBCTR0 Register Definitions */ +#define TPI_ITATBCTR0_ATREADY2_Pos 0U /*!< TPI ITATBCTR0: ATREADY2 Position */ +#define TPI_ITATBCTR0_ATREADY2_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2_Pos*/) /*!< TPI ITATBCTR0: ATREADY2 Mask */ + +#define TPI_ITATBCTR0_ATREADY1_Pos 0U /*!< TPI ITATBCTR0: ATREADY1 Position */ +#define TPI_ITATBCTR0_ATREADY1_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1_Pos*/) /*!< TPI ITATBCTR0: ATREADY1 Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_MinBufSz_Pos 6U /*!< TPI DEVID: MinBufSz Position */ +#define TPI_DEVID_MinBufSz_Msk (0x7UL << TPI_DEVID_MinBufSz_Pos) /*!< TPI DEVID: MinBufSz Mask */ + +#define TPI_DEVID_AsynClkIn_Pos 5U /*!< TPI DEVID: AsynClkIn Position */ +#define TPI_DEVID_AsynClkIn_Msk (0x1UL << TPI_DEVID_AsynClkIn_Pos) /*!< TPI DEVID: AsynClkIn Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x1FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region RNRber Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RASR; /*!< Offset: 0x010 (R/W) MPU Region Attribute and Size Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Alias 1 Region Base Address Register */ + __IOM uint32_t RASR_A1; /*!< Offset: 0x018 (R/W) MPU Alias 1 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Alias 2 Region Base Address Register */ + __IOM uint32_t RASR_A2; /*!< Offset: 0x020 (R/W) MPU Alias 2 Region Attribute and Size Register */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Alias 3 Region Base Address Register */ + __IOM uint32_t RASR_A3; /*!< Offset: 0x028 (R/W) MPU Alias 3 Region Attribute and Size Register */ +} MPU_Type; + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_ADDR_Pos 5U /*!< MPU RBAR: ADDR Position */ +#define MPU_RBAR_ADDR_Msk (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos) /*!< MPU RBAR: ADDR Mask */ + +#define MPU_RBAR_VALID_Pos 4U /*!< MPU RBAR: VALID Position */ +#define MPU_RBAR_VALID_Msk (1UL << MPU_RBAR_VALID_Pos) /*!< MPU RBAR: VALID Mask */ + +#define MPU_RBAR_REGION_Pos 0U /*!< MPU RBAR: REGION Position */ +#define MPU_RBAR_REGION_Msk (0xFUL /*<< MPU_RBAR_REGION_Pos*/) /*!< MPU RBAR: REGION Mask */ + +/* MPU Region Attribute and Size Register Definitions */ +#define MPU_RASR_ATTRS_Pos 16U /*!< MPU RASR: MPU Region Attribute field Position */ +#define MPU_RASR_ATTRS_Msk (0xFFFFUL << MPU_RASR_ATTRS_Pos) /*!< MPU RASR: MPU Region Attribute field Mask */ + +#define MPU_RASR_XN_Pos 28U /*!< MPU RASR: ATTRS.XN Position */ +#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos) /*!< MPU RASR: ATTRS.XN Mask */ + +#define MPU_RASR_AP_Pos 24U /*!< MPU RASR: ATTRS.AP Position */ +#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos) /*!< MPU RASR: ATTRS.AP Mask */ + +#define MPU_RASR_TEX_Pos 19U /*!< MPU RASR: ATTRS.TEX Position */ +#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos) /*!< MPU RASR: ATTRS.TEX Mask */ + +#define MPU_RASR_S_Pos 18U /*!< MPU RASR: ATTRS.S Position */ +#define MPU_RASR_S_Msk (1UL << MPU_RASR_S_Pos) /*!< MPU RASR: ATTRS.S Mask */ + +#define MPU_RASR_C_Pos 17U /*!< MPU RASR: ATTRS.C Position */ +#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos) /*!< MPU RASR: ATTRS.C Mask */ + +#define MPU_RASR_B_Pos 16U /*!< MPU RASR: ATTRS.B Position */ +#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos) /*!< MPU RASR: ATTRS.B Mask */ + +#define MPU_RASR_SRD_Pos 8U /*!< MPU RASR: Sub-Region Disable Position */ +#define MPU_RASR_SRD_Msk (0xFFUL << MPU_RASR_SRD_Pos) /*!< MPU RASR: Sub-Region Disable Mask */ + +#define MPU_RASR_SIZE_Pos 1U /*!< MPU RASR: Region Size Field Position */ +#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos) /*!< MPU RASR: Region Size Field Mask */ + +#define MPU_RASR_ENABLE_Pos 0U /*!< MPU RASR: Region enable bit Position */ +#define MPU_RASR_ENABLE_Msk (1UL /*<< MPU_RASR_ENABLE_Pos*/) /*!< MPU RASR: Region enable bit Disable Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< CoreDebug DEMCR: VC_CORERESET Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ +#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ +#define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ +#define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ +#define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ +#define CoreDebug_BASE (0xE000EDF0UL) /*!< Core Debug Base Address */ +#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ +#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ +#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + +#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ +#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ +#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ +#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ +#define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ +#define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ +#define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ +#define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE) /*!< Core Debug configuration struct */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ +#endif + +/*@} */ + + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* The following EXC_RETURN values are saved the LR on exception entry */ +#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */ +#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */ +#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */ + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IP[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IP[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHP[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + /* ARM Application Note 321 states that the M3 does not require the architectural barrier */ +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/*@} end of CMSIS_Core_NVICFunctions */ + + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + return 0U; /* No FPU */ +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_SC300_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/mpu_armv7.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/mpu_armv7.h new file mode 100755 index 00000000000..791a8dae65a --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/mpu_armv7.h @@ -0,0 +1,275 @@ +/****************************************************************************** + * @file mpu_armv7.h + * @brief CMSIS MPU API for Armv7-M MPU + * @version V5.1.1 + * @date 10. February 2020 + ******************************************************************************/ +/* + * Copyright (c) 2017-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef ARM_MPU_ARMV7_H +#define ARM_MPU_ARMV7_H + +#define ARM_MPU_REGION_SIZE_32B ((uint8_t)0x04U) ///!< MPU Region Size 32 Bytes +#define ARM_MPU_REGION_SIZE_64B ((uint8_t)0x05U) ///!< MPU Region Size 64 Bytes +#define ARM_MPU_REGION_SIZE_128B ((uint8_t)0x06U) ///!< MPU Region Size 128 Bytes +#define ARM_MPU_REGION_SIZE_256B ((uint8_t)0x07U) ///!< MPU Region Size 256 Bytes +#define ARM_MPU_REGION_SIZE_512B ((uint8_t)0x08U) ///!< MPU Region Size 512 Bytes +#define ARM_MPU_REGION_SIZE_1KB ((uint8_t)0x09U) ///!< MPU Region Size 1 KByte +#define ARM_MPU_REGION_SIZE_2KB ((uint8_t)0x0AU) ///!< MPU Region Size 2 KBytes +#define ARM_MPU_REGION_SIZE_4KB ((uint8_t)0x0BU) ///!< MPU Region Size 4 KBytes +#define ARM_MPU_REGION_SIZE_8KB ((uint8_t)0x0CU) ///!< MPU Region Size 8 KBytes +#define ARM_MPU_REGION_SIZE_16KB ((uint8_t)0x0DU) ///!< MPU Region Size 16 KBytes +#define ARM_MPU_REGION_SIZE_32KB ((uint8_t)0x0EU) ///!< MPU Region Size 32 KBytes +#define ARM_MPU_REGION_SIZE_64KB ((uint8_t)0x0FU) ///!< MPU Region Size 64 KBytes +#define ARM_MPU_REGION_SIZE_128KB ((uint8_t)0x10U) ///!< MPU Region Size 128 KBytes +#define ARM_MPU_REGION_SIZE_256KB ((uint8_t)0x11U) ///!< MPU Region Size 256 KBytes +#define ARM_MPU_REGION_SIZE_512KB ((uint8_t)0x12U) ///!< MPU Region Size 512 KBytes +#define ARM_MPU_REGION_SIZE_1MB ((uint8_t)0x13U) ///!< MPU Region Size 1 MByte +#define ARM_MPU_REGION_SIZE_2MB ((uint8_t)0x14U) ///!< MPU Region Size 2 MBytes +#define ARM_MPU_REGION_SIZE_4MB ((uint8_t)0x15U) ///!< MPU Region Size 4 MBytes +#define ARM_MPU_REGION_SIZE_8MB ((uint8_t)0x16U) ///!< MPU Region Size 8 MBytes +#define ARM_MPU_REGION_SIZE_16MB ((uint8_t)0x17U) ///!< MPU Region Size 16 MBytes +#define ARM_MPU_REGION_SIZE_32MB ((uint8_t)0x18U) ///!< MPU Region Size 32 MBytes +#define ARM_MPU_REGION_SIZE_64MB ((uint8_t)0x19U) ///!< MPU Region Size 64 MBytes +#define ARM_MPU_REGION_SIZE_128MB ((uint8_t)0x1AU) ///!< MPU Region Size 128 MBytes +#define ARM_MPU_REGION_SIZE_256MB ((uint8_t)0x1BU) ///!< MPU Region Size 256 MBytes +#define ARM_MPU_REGION_SIZE_512MB ((uint8_t)0x1CU) ///!< MPU Region Size 512 MBytes +#define ARM_MPU_REGION_SIZE_1GB ((uint8_t)0x1DU) ///!< MPU Region Size 1 GByte +#define ARM_MPU_REGION_SIZE_2GB ((uint8_t)0x1EU) ///!< MPU Region Size 2 GBytes +#define ARM_MPU_REGION_SIZE_4GB ((uint8_t)0x1FU) ///!< MPU Region Size 4 GBytes + +#define ARM_MPU_AP_NONE 0U ///!< MPU Access Permission no access +#define ARM_MPU_AP_PRIV 1U ///!< MPU Access Permission privileged access only +#define ARM_MPU_AP_URO 2U ///!< MPU Access Permission unprivileged access read-only +#define ARM_MPU_AP_FULL 3U ///!< MPU Access Permission full access +#define ARM_MPU_AP_PRO 5U ///!< MPU Access Permission privileged access read-only +#define ARM_MPU_AP_RO 6U ///!< MPU Access Permission read-only access + +/** MPU Region Base Address Register Value +* +* \param Region The region to be configured, number 0 to 15. +* \param BaseAddress The base address for the region. +*/ +#define ARM_MPU_RBAR(Region, BaseAddress) \ + (((BaseAddress) & MPU_RBAR_ADDR_Msk) | \ + ((Region) & MPU_RBAR_REGION_Msk) | \ + (MPU_RBAR_VALID_Msk)) + +/** +* MPU Memory Access Attributes +* +* \param TypeExtField Type extension field, allows you to configure memory access type, for example strongly ordered, peripheral. +* \param IsShareable Region is shareable between multiple bus masters. +* \param IsCacheable Region is cacheable, i.e. its value may be kept in cache. +* \param IsBufferable Region is bufferable, i.e. using write-back caching. Cacheable but non-bufferable regions use write-through policy. +*/ +#define ARM_MPU_ACCESS_(TypeExtField, IsShareable, IsCacheable, IsBufferable) \ + ((((TypeExtField) << MPU_RASR_TEX_Pos) & MPU_RASR_TEX_Msk) | \ + (((IsShareable) << MPU_RASR_S_Pos) & MPU_RASR_S_Msk) | \ + (((IsCacheable) << MPU_RASR_C_Pos) & MPU_RASR_C_Msk) | \ + (((IsBufferable) << MPU_RASR_B_Pos) & MPU_RASR_B_Msk)) + +/** +* MPU Region Attribute and Size Register Value +* +* \param DisableExec Instruction access disable bit, 1= disable instruction fetches. +* \param AccessPermission Data access permissions, allows you to configure read/write access for User and Privileged mode. +* \param AccessAttributes Memory access attribution, see \ref ARM_MPU_ACCESS_. +* \param SubRegionDisable Sub-region disable field. +* \param Size Region size of the region to be configured, for example 4K, 8K. +*/ +#define ARM_MPU_RASR_EX(DisableExec, AccessPermission, AccessAttributes, SubRegionDisable, Size) \ + ((((DisableExec) << MPU_RASR_XN_Pos) & MPU_RASR_XN_Msk) | \ + (((AccessPermission) << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk) | \ + (((AccessAttributes) & (MPU_RASR_TEX_Msk | MPU_RASR_S_Msk | MPU_RASR_C_Msk | MPU_RASR_B_Msk))) | \ + (((SubRegionDisable) << MPU_RASR_SRD_Pos) & MPU_RASR_SRD_Msk) | \ + (((Size) << MPU_RASR_SIZE_Pos) & MPU_RASR_SIZE_Msk) | \ + (((MPU_RASR_ENABLE_Msk)))) + +/** +* MPU Region Attribute and Size Register Value +* +* \param DisableExec Instruction access disable bit, 1= disable instruction fetches. +* \param AccessPermission Data access permissions, allows you to configure read/write access for User and Privileged mode. +* \param TypeExtField Type extension field, allows you to configure memory access type, for example strongly ordered, peripheral. +* \param IsShareable Region is shareable between multiple bus masters. +* \param IsCacheable Region is cacheable, i.e. its value may be kept in cache. +* \param IsBufferable Region is bufferable, i.e. using write-back caching. Cacheable but non-bufferable regions use write-through policy. +* \param SubRegionDisable Sub-region disable field. +* \param Size Region size of the region to be configured, for example 4K, 8K. +*/ +#define ARM_MPU_RASR(DisableExec, AccessPermission, TypeExtField, IsShareable, IsCacheable, IsBufferable, SubRegionDisable, Size) \ + ARM_MPU_RASR_EX(DisableExec, AccessPermission, ARM_MPU_ACCESS_(TypeExtField, IsShareable, IsCacheable, IsBufferable), SubRegionDisable, Size) + +/** +* MPU Memory Access Attribute for strongly ordered memory. +* - TEX: 000b +* - Shareable +* - Non-cacheable +* - Non-bufferable +*/ +#define ARM_MPU_ACCESS_ORDERED ARM_MPU_ACCESS_(0U, 1U, 0U, 0U) + +/** +* MPU Memory Access Attribute for device memory. +* - TEX: 000b (if shareable) or 010b (if non-shareable) +* - Shareable or non-shareable +* - Non-cacheable +* - Bufferable (if shareable) or non-bufferable (if non-shareable) +* +* \param IsShareable Configures the device memory as shareable or non-shareable. +*/ +#define ARM_MPU_ACCESS_DEVICE(IsShareable) ((IsShareable) ? ARM_MPU_ACCESS_(0U, 1U, 0U, 1U) : ARM_MPU_ACCESS_(2U, 0U, 0U, 0U)) + +/** +* MPU Memory Access Attribute for normal memory. +* - TEX: 1BBb (reflecting outer cacheability rules) +* - Shareable or non-shareable +* - Cacheable or non-cacheable (reflecting inner cacheability rules) +* - Bufferable or non-bufferable (reflecting inner cacheability rules) +* +* \param OuterCp Configures the outer cache policy. +* \param InnerCp Configures the inner cache policy. +* \param IsShareable Configures the memory as shareable or non-shareable. +*/ +#define ARM_MPU_ACCESS_NORMAL(OuterCp, InnerCp, IsShareable) ARM_MPU_ACCESS_((4U | (OuterCp)), IsShareable, ((InnerCp) >> 1U), ((InnerCp) & 1U)) + +/** +* MPU Memory Access Attribute non-cacheable policy. +*/ +#define ARM_MPU_CACHEP_NOCACHE 0U + +/** +* MPU Memory Access Attribute write-back, write and read allocate policy. +*/ +#define ARM_MPU_CACHEP_WB_WRA 1U + +/** +* MPU Memory Access Attribute write-through, no write allocate policy. +*/ +#define ARM_MPU_CACHEP_WT_NWA 2U + +/** +* MPU Memory Access Attribute write-back, no write allocate policy. +*/ +#define ARM_MPU_CACHEP_WB_NWA 3U + + +/** +* Struct for a single MPU Region +*/ +typedef struct { + uint32_t RBAR; //!< The region base address register value (RBAR) + uint32_t RASR; //!< The region attribute and size register value (RASR) \ref MPU_RASR +} ARM_MPU_Region_t; + +/** Enable the MPU. +* \param MPU_Control Default access permissions for unconfigured regions. +*/ +__STATIC_INLINE void ARM_MPU_Enable(uint32_t MPU_Control) +{ + __DMB(); + MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; +#endif + __DSB(); + __ISB(); +} + +/** Disable the MPU. +*/ +__STATIC_INLINE void ARM_MPU_Disable(void) +{ + __DMB(); +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; +#endif + MPU->CTRL &= ~MPU_CTRL_ENABLE_Msk; + __DSB(); + __ISB(); +} + +/** Clear and disable the given MPU region. +* \param rnr Region number to be cleared. +*/ +__STATIC_INLINE void ARM_MPU_ClrRegion(uint32_t rnr) +{ + MPU->RNR = rnr; + MPU->RASR = 0U; +} + +/** Configure an MPU region. +* \param rbar Value for RBAR register. +* \param rsar Value for RSAR register. +*/ +__STATIC_INLINE void ARM_MPU_SetRegion(uint32_t rbar, uint32_t rasr) +{ + MPU->RBAR = rbar; + MPU->RASR = rasr; +} + +/** Configure the given MPU region. +* \param rnr Region number to be configured. +* \param rbar Value for RBAR register. +* \param rsar Value for RSAR register. +*/ +__STATIC_INLINE void ARM_MPU_SetRegionEx(uint32_t rnr, uint32_t rbar, uint32_t rasr) +{ + MPU->RNR = rnr; + MPU->RBAR = rbar; + MPU->RASR = rasr; +} + +/** Memcopy with strictly ordered memory access, e.g. for register targets. +* \param dst Destination data is copied to. +* \param src Source data is copied from. +* \param len Amount of data words to be copied. +*/ +__STATIC_INLINE void ARM_MPU_OrderedMemcpy(volatile uint32_t* dst, const uint32_t* __RESTRICT src, uint32_t len) +{ + uint32_t i; + for (i = 0U; i < len; ++i) + { + dst[i] = src[i]; + } +} + +/** Load the given number of MPU regions from a table. +* \param table Pointer to the MPU configuration table. +* \param cnt Amount of regions to be configured. +*/ +__STATIC_INLINE void ARM_MPU_Load(ARM_MPU_Region_t const* table, uint32_t cnt) +{ + const uint32_t rowWordSize = sizeof(ARM_MPU_Region_t)/4U; + while (cnt > MPU_TYPE_RALIASES) { + ARM_MPU_OrderedMemcpy(&(MPU->RBAR), &(table->RBAR), MPU_TYPE_RALIASES*rowWordSize); + table += MPU_TYPE_RALIASES; + cnt -= MPU_TYPE_RALIASES; + } + ARM_MPU_OrderedMemcpy(&(MPU->RBAR), &(table->RBAR), cnt*rowWordSize); +} + +#endif diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/mpu_armv8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/mpu_armv8.h new file mode 100755 index 00000000000..ef44ad01df0 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/mpu_armv8.h @@ -0,0 +1,352 @@ +/****************************************************************************** + * @file mpu_armv8.h + * @brief CMSIS MPU API for Armv8-M and Armv8.1-M MPU + * @version V5.1.2 + * @date 10. February 2020 + ******************************************************************************/ +/* + * Copyright (c) 2017-2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef ARM_MPU_ARMV8_H +#define ARM_MPU_ARMV8_H + +/** \brief Attribute for device memory (outer only) */ +#define ARM_MPU_ATTR_DEVICE ( 0U ) + +/** \brief Attribute for non-cacheable, normal memory */ +#define ARM_MPU_ATTR_NON_CACHEABLE ( 4U ) + +/** \brief Attribute for normal memory (outer and inner) +* \param NT Non-Transient: Set to 1 for non-transient data. +* \param WB Write-Back: Set to 1 to use write-back update policy. +* \param RA Read Allocation: Set to 1 to use cache allocation on read miss. +* \param WA Write Allocation: Set to 1 to use cache allocation on write miss. +*/ +#define ARM_MPU_ATTR_MEMORY_(NT, WB, RA, WA) \ + ((((NT) & 1U) << 3U) | (((WB) & 1U) << 2U) | (((RA) & 1U) << 1U) | ((WA) & 1U)) + +/** \brief Device memory type non Gathering, non Re-ordering, non Early Write Acknowledgement */ +#define ARM_MPU_ATTR_DEVICE_nGnRnE (0U) + +/** \brief Device memory type non Gathering, non Re-ordering, Early Write Acknowledgement */ +#define ARM_MPU_ATTR_DEVICE_nGnRE (1U) + +/** \brief Device memory type non Gathering, Re-ordering, Early Write Acknowledgement */ +#define ARM_MPU_ATTR_DEVICE_nGRE (2U) + +/** \brief Device memory type Gathering, Re-ordering, Early Write Acknowledgement */ +#define ARM_MPU_ATTR_DEVICE_GRE (3U) + +/** \brief Memory Attribute +* \param O Outer memory attributes +* \param I O == ARM_MPU_ATTR_DEVICE: Device memory attributes, else: Inner memory attributes +*/ +#define ARM_MPU_ATTR(O, I) ((((O) & 0xFU) << 4U) | ((((O) & 0xFU) != 0U) ? ((I) & 0xFU) : (((I) & 0x3U) << 2U))) + +/** \brief Normal memory non-shareable */ +#define ARM_MPU_SH_NON (0U) + +/** \brief Normal memory outer shareable */ +#define ARM_MPU_SH_OUTER (2U) + +/** \brief Normal memory inner shareable */ +#define ARM_MPU_SH_INNER (3U) + +/** \brief Memory access permissions +* \param RO Read-Only: Set to 1 for read-only memory. +* \param NP Non-Privileged: Set to 1 for non-privileged memory. +*/ +#define ARM_MPU_AP_(RO, NP) ((((RO) & 1U) << 1U) | ((NP) & 1U)) + +/** \brief Region Base Address Register value +* \param BASE The base address bits [31:5] of a memory region. The value is zero extended. Effective address gets 32 byte aligned. +* \param SH Defines the Shareability domain for this memory region. +* \param RO Read-Only: Set to 1 for a read-only memory region. +* \param NP Non-Privileged: Set to 1 for a non-privileged memory region. +* \oaram XN eXecute Never: Set to 1 for a non-executable memory region. +*/ +#define ARM_MPU_RBAR(BASE, SH, RO, NP, XN) \ + (((BASE) & MPU_RBAR_BASE_Msk) | \ + (((SH) << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | \ + ((ARM_MPU_AP_(RO, NP) << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk) | \ + (((XN) << MPU_RBAR_XN_Pos) & MPU_RBAR_XN_Msk)) + +/** \brief Region Limit Address Register value +* \param LIMIT The limit address bits [31:5] for this memory region. The value is one extended. +* \param IDX The attribute index to be associated with this memory region. +*/ +#define ARM_MPU_RLAR(LIMIT, IDX) \ + (((LIMIT) & MPU_RLAR_LIMIT_Msk) | \ + (((IDX) << MPU_RLAR_AttrIndx_Pos) & MPU_RLAR_AttrIndx_Msk) | \ + (MPU_RLAR_EN_Msk)) + +#if defined(MPU_RLAR_PXN_Pos) + +/** \brief Region Limit Address Register with PXN value +* \param LIMIT The limit address bits [31:5] for this memory region. The value is one extended. +* \param PXN Privileged execute never. Defines whether code can be executed from this privileged region. +* \param IDX The attribute index to be associated with this memory region. +*/ +#define ARM_MPU_RLAR_PXN(LIMIT, PXN, IDX) \ + (((LIMIT) & MPU_RLAR_LIMIT_Msk) | \ + (((PXN) << MPU_RLAR_PXN_Pos) & MPU_RLAR_PXN_Msk) | \ + (((IDX) << MPU_RLAR_AttrIndx_Pos) & MPU_RLAR_AttrIndx_Msk) | \ + (MPU_RLAR_EN_Msk)) + +#endif + +/** +* Struct for a single MPU Region +*/ +typedef struct { + uint32_t RBAR; /*!< Region Base Address Register value */ + uint32_t RLAR; /*!< Region Limit Address Register value */ +} ARM_MPU_Region_t; + +/** Enable the MPU. +* \param MPU_Control Default access permissions for unconfigured regions. +*/ +__STATIC_INLINE void ARM_MPU_Enable(uint32_t MPU_Control) +{ + __DMB(); + MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; +#endif + __DSB(); + __ISB(); +} + +/** Disable the MPU. +*/ +__STATIC_INLINE void ARM_MPU_Disable(void) +{ + __DMB(); +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; +#endif + MPU->CTRL &= ~MPU_CTRL_ENABLE_Msk; + __DSB(); + __ISB(); +} + +#ifdef MPU_NS +/** Enable the Non-secure MPU. +* \param MPU_Control Default access permissions for unconfigured regions. +*/ +__STATIC_INLINE void ARM_MPU_Enable_NS(uint32_t MPU_Control) +{ + __DMB(); + MPU_NS->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB_NS->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; +#endif + __DSB(); + __ISB(); +} + +/** Disable the Non-secure MPU. +*/ +__STATIC_INLINE void ARM_MPU_Disable_NS(void) +{ + __DMB(); +#ifdef SCB_SHCSR_MEMFAULTENA_Msk + SCB_NS->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; +#endif + MPU_NS->CTRL &= ~MPU_CTRL_ENABLE_Msk; + __DSB(); + __ISB(); +} +#endif + +/** Set the memory attribute encoding to the given MPU. +* \param mpu Pointer to the MPU to be configured. +* \param idx The attribute index to be set [0-7] +* \param attr The attribute value to be set. +*/ +__STATIC_INLINE void ARM_MPU_SetMemAttrEx(MPU_Type* mpu, uint8_t idx, uint8_t attr) +{ + const uint8_t reg = idx / 4U; + const uint32_t pos = ((idx % 4U) * 8U); + const uint32_t mask = 0xFFU << pos; + + if (reg >= (sizeof(mpu->MAIR) / sizeof(mpu->MAIR[0]))) { + return; // invalid index + } + + mpu->MAIR[reg] = ((mpu->MAIR[reg] & ~mask) | ((attr << pos) & mask)); +} + +/** Set the memory attribute encoding. +* \param idx The attribute index to be set [0-7] +* \param attr The attribute value to be set. +*/ +__STATIC_INLINE void ARM_MPU_SetMemAttr(uint8_t idx, uint8_t attr) +{ + ARM_MPU_SetMemAttrEx(MPU, idx, attr); +} + +#ifdef MPU_NS +/** Set the memory attribute encoding to the Non-secure MPU. +* \param idx The attribute index to be set [0-7] +* \param attr The attribute value to be set. +*/ +__STATIC_INLINE void ARM_MPU_SetMemAttr_NS(uint8_t idx, uint8_t attr) +{ + ARM_MPU_SetMemAttrEx(MPU_NS, idx, attr); +} +#endif + +/** Clear and disable the given MPU region of the given MPU. +* \param mpu Pointer to MPU to be used. +* \param rnr Region number to be cleared. +*/ +__STATIC_INLINE void ARM_MPU_ClrRegionEx(MPU_Type* mpu, uint32_t rnr) +{ + mpu->RNR = rnr; + mpu->RLAR = 0U; +} + +/** Clear and disable the given MPU region. +* \param rnr Region number to be cleared. +*/ +__STATIC_INLINE void ARM_MPU_ClrRegion(uint32_t rnr) +{ + ARM_MPU_ClrRegionEx(MPU, rnr); +} + +#ifdef MPU_NS +/** Clear and disable the given Non-secure MPU region. +* \param rnr Region number to be cleared. +*/ +__STATIC_INLINE void ARM_MPU_ClrRegion_NS(uint32_t rnr) +{ + ARM_MPU_ClrRegionEx(MPU_NS, rnr); +} +#endif + +/** Configure the given MPU region of the given MPU. +* \param mpu Pointer to MPU to be used. +* \param rnr Region number to be configured. +* \param rbar Value for RBAR register. +* \param rlar Value for RLAR register. +*/ +__STATIC_INLINE void ARM_MPU_SetRegionEx(MPU_Type* mpu, uint32_t rnr, uint32_t rbar, uint32_t rlar) +{ + mpu->RNR = rnr; + mpu->RBAR = rbar; + mpu->RLAR = rlar; +} + +/** Configure the given MPU region. +* \param rnr Region number to be configured. +* \param rbar Value for RBAR register. +* \param rlar Value for RLAR register. +*/ +__STATIC_INLINE void ARM_MPU_SetRegion(uint32_t rnr, uint32_t rbar, uint32_t rlar) +{ + ARM_MPU_SetRegionEx(MPU, rnr, rbar, rlar); +} + +#ifdef MPU_NS +/** Configure the given Non-secure MPU region. +* \param rnr Region number to be configured. +* \param rbar Value for RBAR register. +* \param rlar Value for RLAR register. +*/ +__STATIC_INLINE void ARM_MPU_SetRegion_NS(uint32_t rnr, uint32_t rbar, uint32_t rlar) +{ + ARM_MPU_SetRegionEx(MPU_NS, rnr, rbar, rlar); +} +#endif + +/** Memcopy with strictly ordered memory access, e.g. for register targets. +* \param dst Destination data is copied to. +* \param src Source data is copied from. +* \param len Amount of data words to be copied. +*/ +__STATIC_INLINE void ARM_MPU_OrderedMemcpy(volatile uint32_t* dst, const uint32_t* __RESTRICT src, uint32_t len) +{ + uint32_t i; + for (i = 0U; i < len; ++i) + { + dst[i] = src[i]; + } +} + +/** Load the given number of MPU regions from a table to the given MPU. +* \param mpu Pointer to the MPU registers to be used. +* \param rnr First region number to be configured. +* \param table Pointer to the MPU configuration table. +* \param cnt Amount of regions to be configured. +*/ +__STATIC_INLINE void ARM_MPU_LoadEx(MPU_Type* mpu, uint32_t rnr, ARM_MPU_Region_t const* table, uint32_t cnt) +{ + const uint32_t rowWordSize = sizeof(ARM_MPU_Region_t)/4U; + if (cnt == 1U) { + mpu->RNR = rnr; + ARM_MPU_OrderedMemcpy(&(mpu->RBAR), &(table->RBAR), rowWordSize); + } else { + uint32_t rnrBase = rnr & ~(MPU_TYPE_RALIASES-1U); + uint32_t rnrOffset = rnr % MPU_TYPE_RALIASES; + + mpu->RNR = rnrBase; + while ((rnrOffset + cnt) > MPU_TYPE_RALIASES) { + uint32_t c = MPU_TYPE_RALIASES - rnrOffset; + ARM_MPU_OrderedMemcpy(&(mpu->RBAR)+(rnrOffset*2U), &(table->RBAR), c*rowWordSize); + table += c; + cnt -= c; + rnrOffset = 0U; + rnrBase += MPU_TYPE_RALIASES; + mpu->RNR = rnrBase; + } + + ARM_MPU_OrderedMemcpy(&(mpu->RBAR)+(rnrOffset*2U), &(table->RBAR), cnt*rowWordSize); + } +} + +/** Load the given number of MPU regions from a table. +* \param rnr First region number to be configured. +* \param table Pointer to the MPU configuration table. +* \param cnt Amount of regions to be configured. +*/ +__STATIC_INLINE void ARM_MPU_Load(uint32_t rnr, ARM_MPU_Region_t const* table, uint32_t cnt) +{ + ARM_MPU_LoadEx(MPU, rnr, table, cnt); +} + +#ifdef MPU_NS +/** Load the given number of MPU regions from a table to the Non-secure MPU. +* \param rnr First region number to be configured. +* \param table Pointer to the MPU configuration table. +* \param cnt Amount of regions to be configured. +*/ +__STATIC_INLINE void ARM_MPU_Load_NS(uint32_t rnr, ARM_MPU_Region_t const* table, uint32_t cnt) +{ + ARM_MPU_LoadEx(MPU_NS, rnr, table, cnt); +} +#endif + +#endif + diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/pmu_armv8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/pmu_armv8.h new file mode 100755 index 00000000000..dbd39d20c73 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/pmu_armv8.h @@ -0,0 +1,337 @@ +/****************************************************************************** + * @file pmu_armv8.h + * @brief CMSIS PMU API for Armv8.1-M PMU + * @version V1.0.0 + * @date 24. March 2020 + ******************************************************************************/ +/* + * Copyright (c) 2020 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef ARM_PMU_ARMV8_H +#define ARM_PMU_ARMV8_H + +/** + * \brief PMU Events + * \note See the Armv8.1-M Architecture Reference Manual for full details on these PMU events. + * */ + +#define ARM_PMU_SW_INCR 0x0000 /*!< Software update to the PMU_SWINC register, architecturally executed and condition code check pass */ +#define ARM_PMU_L1I_CACHE_REFILL 0x0001 /*!< L1 I-Cache refill */ +#define ARM_PMU_L1D_CACHE_REFILL 0x0003 /*!< L1 D-Cache refill */ +#define ARM_PMU_L1D_CACHE 0x0004 /*!< L1 D-Cache access */ +#define ARM_PMU_LD_RETIRED 0x0006 /*!< Memory-reading instruction architecturally executed and condition code check pass */ +#define ARM_PMU_ST_RETIRED 0x0007 /*!< Memory-writing instruction architecturally executed and condition code check pass */ +#define ARM_PMU_INST_RETIRED 0x0008 /*!< Instruction architecturally executed */ +#define ARM_PMU_EXC_TAKEN 0x0009 /*!< Exception entry */ +#define ARM_PMU_EXC_RETURN 0x000A /*!< Exception return instruction architecturally executed and the condition code check pass */ +#define ARM_PMU_PC_WRITE_RETIRED 0x000C /*!< Software change to the Program Counter (PC). Instruction is architecturally executed and condition code check pass */ +#define ARM_PMU_BR_IMMED_RETIRED 0x000D /*!< Immediate branch architecturally executed */ +#define ARM_PMU_BR_RETURN_RETIRED 0x000E /*!< Function return instruction architecturally executed and the condition code check pass */ +#define ARM_PMU_UNALIGNED_LDST_RETIRED 0x000F /*!< Unaligned memory memory-reading or memory-writing instruction architecturally executed and condition code check pass */ +#define ARM_PMU_BR_MIS_PRED 0x0010 /*!< Mispredicted or not predicted branch speculatively executed */ +#define ARM_PMU_CPU_CYCLES 0x0011 /*!< Cycle */ +#define ARM_PMU_BR_PRED 0x0012 /*!< Predictable branch speculatively executed */ +#define ARM_PMU_MEM_ACCESS 0x0013 /*!< Data memory access */ +#define ARM_PMU_L1I_CACHE 0x0014 /*!< Level 1 instruction cache access */ +#define ARM_PMU_L1D_CACHE_WB 0x0015 /*!< Level 1 data cache write-back */ +#define ARM_PMU_L2D_CACHE 0x0016 /*!< Level 2 data cache access */ +#define ARM_PMU_L2D_CACHE_REFILL 0x0017 /*!< Level 2 data cache refill */ +#define ARM_PMU_L2D_CACHE_WB 0x0018 /*!< Level 2 data cache write-back */ +#define ARM_PMU_BUS_ACCESS 0x0019 /*!< Bus access */ +#define ARM_PMU_MEMORY_ERROR 0x001A /*!< Local memory error */ +#define ARM_PMU_INST_SPEC 0x001B /*!< Instruction speculatively executed */ +#define ARM_PMU_BUS_CYCLES 0x001D /*!< Bus cycles */ +#define ARM_PMU_CHAIN 0x001E /*!< For an odd numbered counter, increment when an overflow occurs on the preceding even-numbered counter on the same PE */ +#define ARM_PMU_L1D_CACHE_ALLOCATE 0x001F /*!< Level 1 data cache allocation without refill */ +#define ARM_PMU_L2D_CACHE_ALLOCATE 0x0020 /*!< Level 2 data cache allocation without refill */ +#define ARM_PMU_BR_RETIRED 0x0021 /*!< Branch instruction architecturally executed */ +#define ARM_PMU_BR_MIS_PRED_RETIRED 0x0022 /*!< Mispredicted branch instruction architecturally executed */ +#define ARM_PMU_STALL_FRONTEND 0x0023 /*!< No operation issued because of the frontend */ +#define ARM_PMU_STALL_BACKEND 0x0024 /*!< No operation issued because of the backend */ +#define ARM_PMU_L2I_CACHE 0x0027 /*!< Level 2 instruction cache access */ +#define ARM_PMU_L2I_CACHE_REFILL 0x0028 /*!< Level 2 instruction cache refill */ +#define ARM_PMU_L3D_CACHE_ALLOCATE 0x0029 /*!< Level 3 data cache allocation without refill */ +#define ARM_PMU_L3D_CACHE_REFILL 0x002A /*!< Level 3 data cache refill */ +#define ARM_PMU_L3D_CACHE 0x002B /*!< Level 3 data cache access */ +#define ARM_PMU_L3D_CACHE_WB 0x002C /*!< Level 3 data cache write-back */ +#define ARM_PMU_LL_CACHE_RD 0x0036 /*!< Last level data cache read */ +#define ARM_PMU_LL_CACHE_MISS_RD 0x0037 /*!< Last level data cache read miss */ +#define ARM_PMU_L1D_CACHE_MISS_RD 0x0039 /*!< Level 1 data cache read miss */ +#define ARM_PMU_OP_COMPLETE 0x003A /*!< Operation retired */ +#define ARM_PMU_OP_SPEC 0x003B /*!< Operation speculatively executed */ +#define ARM_PMU_STALL 0x003C /*!< Stall cycle for instruction or operation not sent for execution */ +#define ARM_PMU_STALL_OP_BACKEND 0x003D /*!< Stall cycle for instruction or operation not sent for execution due to pipeline backend */ +#define ARM_PMU_STALL_OP_FRONTEND 0x003E /*!< Stall cycle for instruction or operation not sent for execution due to pipeline frontend */ +#define ARM_PMU_STALL_OP 0x003F /*!< Instruction or operation slots not occupied each cycle */ +#define ARM_PMU_L1D_CACHE_RD 0x0040 /*!< Level 1 data cache read */ +#define ARM_PMU_LE_RETIRED 0x0100 /*!< Loop end instruction executed */ +#define ARM_PMU_LE_SPEC 0x0101 /*!< Loop end instruction speculatively executed */ +#define ARM_PMU_BF_RETIRED 0x0104 /*!< Branch future instruction architecturally executed and condition code check pass */ +#define ARM_PMU_BF_SPEC 0x0105 /*!< Branch future instruction speculatively executed and condition code check pass */ +#define ARM_PMU_LE_CANCEL 0x0108 /*!< Loop end instruction not taken */ +#define ARM_PMU_BF_CANCEL 0x0109 /*!< Branch future instruction not taken */ +#define ARM_PMU_SE_CALL_S 0x0114 /*!< Call to secure function, resulting in Security state change */ +#define ARM_PMU_SE_CALL_NS 0x0115 /*!< Call to non-secure function, resulting in Security state change */ +#define ARM_PMU_DWT_CMPMATCH0 0x0118 /*!< DWT comparator 0 match */ +#define ARM_PMU_DWT_CMPMATCH1 0x0119 /*!< DWT comparator 1 match */ +#define ARM_PMU_DWT_CMPMATCH2 0x011A /*!< DWT comparator 2 match */ +#define ARM_PMU_DWT_CMPMATCH3 0x011B /*!< DWT comparator 3 match */ +#define ARM_PMU_MVE_INST_RETIRED 0x0200 /*!< MVE instruction architecturally executed */ +#define ARM_PMU_MVE_INST_SPEC 0x0201 /*!< MVE instruction speculatively executed */ +#define ARM_PMU_MVE_FP_RETIRED 0x0204 /*!< MVE floating-point instruction architecturally executed */ +#define ARM_PMU_MVE_FP_SPEC 0x0205 /*!< MVE floating-point instruction speculatively executed */ +#define ARM_PMU_MVE_FP_HP_RETIRED 0x0208 /*!< MVE half-precision floating-point instruction architecturally executed */ +#define ARM_PMU_MVE_FP_HP_SPEC 0x0209 /*!< MVE half-precision floating-point instruction speculatively executed */ +#define ARM_PMU_MVE_FP_SP_RETIRED 0x020C /*!< MVE single-precision floating-point instruction architecturally executed */ +#define ARM_PMU_MVE_FP_SP_SPEC 0x020D /*!< MVE single-precision floating-point instruction speculatively executed */ +#define ARM_PMU_MVE_FP_MAC_RETIRED 0x0214 /*!< MVE floating-point multiply or multiply-accumulate instruction architecturally executed */ +#define ARM_PMU_MVE_FP_MAC_SPEC 0x0215 /*!< MVE floating-point multiply or multiply-accumulate instruction speculatively executed */ +#define ARM_PMU_MVE_INT_RETIRED 0x0224 /*!< MVE integer instruction architecturally executed */ +#define ARM_PMU_MVE_INT_SPEC 0x0225 /*!< MVE integer instruction speculatively executed */ +#define ARM_PMU_MVE_INT_MAC_RETIRED 0x0228 /*!< MVE multiply or multiply-accumulate instruction architecturally executed */ +#define ARM_PMU_MVE_INT_MAC_SPEC 0x0229 /*!< MVE multiply or multiply-accumulate instruction speculatively executed */ +#define ARM_PMU_MVE_LDST_RETIRED 0x0238 /*!< MVE load or store instruction architecturally executed */ +#define ARM_PMU_MVE_LDST_SPEC 0x0239 /*!< MVE load or store instruction speculatively executed */ +#define ARM_PMU_MVE_LD_RETIRED 0x023C /*!< MVE load instruction architecturally executed */ +#define ARM_PMU_MVE_LD_SPEC 0x023D /*!< MVE load instruction speculatively executed */ +#define ARM_PMU_MVE_ST_RETIRED 0x0240 /*!< MVE store instruction architecturally executed */ +#define ARM_PMU_MVE_ST_SPEC 0x0241 /*!< MVE store instruction speculatively executed */ +#define ARM_PMU_MVE_LDST_CONTIG_RETIRED 0x0244 /*!< MVE contiguous load or store instruction architecturally executed */ +#define ARM_PMU_MVE_LDST_CONTIG_SPEC 0x0245 /*!< MVE contiguous load or store instruction speculatively executed */ +#define ARM_PMU_MVE_LD_CONTIG_RETIRED 0x0248 /*!< MVE contiguous load instruction architecturally executed */ +#define ARM_PMU_MVE_LD_CONTIG_SPEC 0x0249 /*!< MVE contiguous load instruction speculatively executed */ +#define ARM_PMU_MVE_ST_CONTIG_RETIRED 0x024C /*!< MVE contiguous store instruction architecturally executed */ +#define ARM_PMU_MVE_ST_CONTIG_SPEC 0x024D /*!< MVE contiguous store instruction speculatively executed */ +#define ARM_PMU_MVE_LDST_NONCONTIG_RETIRED 0x0250 /*!< MVE non-contiguous load or store instruction architecturally executed */ +#define ARM_PMU_MVE_LDST_NONCONTIG_SPEC 0x0251 /*!< MVE non-contiguous load or store instruction speculatively executed */ +#define ARM_PMU_MVE_LD_NONCONTIG_RETIRED 0x0254 /*!< MVE non-contiguous load instruction architecturally executed */ +#define ARM_PMU_MVE_LD_NONCONTIG_SPEC 0x0255 /*!< MVE non-contiguous load instruction speculatively executed */ +#define ARM_PMU_MVE_ST_NONCONTIG_RETIRED 0x0258 /*!< MVE non-contiguous store instruction architecturally executed */ +#define ARM_PMU_MVE_ST_NONCONTIG_SPEC 0x0259 /*!< MVE non-contiguous store instruction speculatively executed */ +#define ARM_PMU_MVE_LDST_MULTI_RETIRED 0x025C /*!< MVE memory instruction targeting multiple registers architecturally executed */ +#define ARM_PMU_MVE_LDST_MULTI_SPEC 0x025D /*!< MVE memory instruction targeting multiple registers speculatively executed */ +#define ARM_PMU_MVE_LD_MULTI_RETIRED 0x0260 /*!< MVE memory load instruction targeting multiple registers architecturally executed */ +#define ARM_PMU_MVE_LD_MULTI_SPEC 0x0261 /*!< MVE memory load instruction targeting multiple registers speculatively executed */ +#define ARM_PMU_MVE_ST_MULTI_RETIRED 0x0261 /*!< MVE memory store instruction targeting multiple registers architecturally executed */ +#define ARM_PMU_MVE_ST_MULTI_SPEC 0x0265 /*!< MVE memory store instruction targeting multiple registers speculatively executed */ +#define ARM_PMU_MVE_LDST_UNALIGNED_RETIRED 0x028C /*!< MVE unaligned memory load or store instruction architecturally executed */ +#define ARM_PMU_MVE_LDST_UNALIGNED_SPEC 0x028D /*!< MVE unaligned memory load or store instruction speculatively executed */ +#define ARM_PMU_MVE_LD_UNALIGNED_RETIRED 0x0290 /*!< MVE unaligned load instruction architecturally executed */ +#define ARM_PMU_MVE_LD_UNALIGNED_SPEC 0x0291 /*!< MVE unaligned load instruction speculatively executed */ +#define ARM_PMU_MVE_ST_UNALIGNED_RETIRED 0x0294 /*!< MVE unaligned store instruction architecturally executed */ +#define ARM_PMU_MVE_ST_UNALIGNED_SPEC 0x0295 /*!< MVE unaligned store instruction speculatively executed */ +#define ARM_PMU_MVE_LDST_UNALIGNED_NONCONTIG_RETIRED 0x0298 /*!< MVE unaligned noncontiguous load or store instruction architecturally executed */ +#define ARM_PMU_MVE_LDST_UNALIGNED_NONCONTIG_SPEC 0x0299 /*!< MVE unaligned noncontiguous load or store instruction speculatively executed */ +#define ARM_PMU_MVE_VREDUCE_RETIRED 0x02A0 /*!< MVE vector reduction instruction architecturally executed */ +#define ARM_PMU_MVE_VREDUCE_SPEC 0x02A1 /*!< MVE vector reduction instruction speculatively executed */ +#define ARM_PMU_MVE_VREDUCE_FP_RETIRED 0x02A4 /*!< MVE floating-point vector reduction instruction architecturally executed */ +#define ARM_PMU_MVE_VREDUCE_FP_SPEC 0x02A5 /*!< MVE floating-point vector reduction instruction speculatively executed */ +#define ARM_PMU_MVE_VREDUCE_INT_RETIRED 0x02A8 /*!< MVE integer vector reduction instruction architecturally executed */ +#define ARM_PMU_MVE_VREDUCE_INT_SPEC 0x02A9 /*!< MVE integer vector reduction instruction speculatively executed */ +#define ARM_PMU_MVE_PRED 0x02B8 /*!< Cycles where one or more predicated beats architecturally executed */ +#define ARM_PMU_MVE_STALL 0x02CC /*!< Stall cycles caused by an MVE instruction */ +#define ARM_PMU_MVE_STALL_RESOURCE 0x02CD /*!< Stall cycles caused by an MVE instruction because of resource conflicts */ +#define ARM_PMU_MVE_STALL_RESOURCE_MEM 0x02CE /*!< Stall cycles caused by an MVE instruction because of memory resource conflicts */ +#define ARM_PMU_MVE_STALL_RESOURCE_FP 0x02CF /*!< Stall cycles caused by an MVE instruction because of floating-point resource conflicts */ +#define ARM_PMU_MVE_STALL_RESOURCE_INT 0x02D0 /*!< Stall cycles caused by an MVE instruction because of integer resource conflicts */ +#define ARM_PMU_MVE_STALL_BREAK 0x02D3 /*!< Stall cycles caused by an MVE chain break */ +#define ARM_PMU_MVE_STALL_DEPENDENCY 0x02D4 /*!< Stall cycles caused by MVE register dependency */ +#define ARM_PMU_ITCM_ACCESS 0x4007 /*!< Instruction TCM access */ +#define ARM_PMU_DTCM_ACCESS 0x4008 /*!< Data TCM access */ +#define ARM_PMU_TRCEXTOUT0 0x4010 /*!< ETM external output 0 */ +#define ARM_PMU_TRCEXTOUT1 0x4011 /*!< ETM external output 1 */ +#define ARM_PMU_TRCEXTOUT2 0x4012 /*!< ETM external output 2 */ +#define ARM_PMU_TRCEXTOUT3 0x4013 /*!< ETM external output 3 */ +#define ARM_PMU_CTI_TRIGOUT4 0x4018 /*!< Cross-trigger Interface output trigger 4 */ +#define ARM_PMU_CTI_TRIGOUT5 0x4019 /*!< Cross-trigger Interface output trigger 5 */ +#define ARM_PMU_CTI_TRIGOUT6 0x401A /*!< Cross-trigger Interface output trigger 6 */ +#define ARM_PMU_CTI_TRIGOUT7 0x401B /*!< Cross-trigger Interface output trigger 7 */ + +/** \brief PMU Functions */ + +__STATIC_INLINE void ARM_PMU_Enable(void); +__STATIC_INLINE void ARM_PMU_Disable(void); + +__STATIC_INLINE void ARM_PMU_Set_EVTYPER(uint32_t num, uint32_t type); + +__STATIC_INLINE void ARM_PMU_CYCCNT_Reset(void); +__STATIC_INLINE void ARM_PMU_EVCNTR_ALL_Reset(void); + +__STATIC_INLINE void ARM_PMU_CNTR_Enable(uint32_t mask); +__STATIC_INLINE void ARM_PMU_CNTR_Disable(uint32_t mask); + +__STATIC_INLINE uint32_t ARM_PMU_Get_CCNTR(void); +__STATIC_INLINE uint32_t ARM_PMU_Get_EVCNTR(uint32_t num); + +__STATIC_INLINE uint32_t ARM_PMU_Get_CNTR_OVS(void); +__STATIC_INLINE void ARM_PMU_Set_CNTR_OVS(uint32_t mask); + +__STATIC_INLINE void ARM_PMU_Set_CNTR_IRQ_Enable(uint32_t mask); +__STATIC_INLINE void ARM_PMU_Set_CNTR_IRQ_Disable(uint32_t mask); + +__STATIC_INLINE void ARM_PMU_CNTR_Increment(uint32_t mask); + +/** + \brief Enable the PMU +*/ +__STATIC_INLINE void ARM_PMU_Enable(void) +{ + PMU->CTRL |= PMU_CTRL_ENABLE_Msk; +} + +/** + \brief Disable the PMU +*/ +__STATIC_INLINE void ARM_PMU_Disable(void) +{ + PMU->CTRL &= ~PMU_CTRL_ENABLE_Msk; +} + +/** + \brief Set event to count for PMU eventer counter + \param [in] num Event counter (0-30) to configure + \param [in] type Event to count +*/ +__STATIC_INLINE void ARM_PMU_Set_EVTYPER(uint32_t num, uint32_t type) +{ + PMU->EVTYPER[num] = type; +} + +/** + \brief Reset cycle counter +*/ +__STATIC_INLINE void ARM_PMU_CYCCNT_Reset(void) +{ + PMU->CTRL |= PMU_CTRL_CYCCNT_RESET_Msk; +} + +/** + \brief Reset all event counters +*/ +__STATIC_INLINE void ARM_PMU_EVCNTR_ALL_Reset(void) +{ + PMU->CTRL |= PMU_CTRL_EVENTCNT_RESET_Msk; +} + +/** + \brief Enable counters + \param [in] mask Counters to enable + \note Enables one or more of the following: + - event counters (0-30) + - cycle counter +*/ +__STATIC_INLINE void ARM_PMU_CNTR_Enable(uint32_t mask) +{ + PMU->CNTENSET = mask; +} + +/** + \brief Disable counters + \param [in] mask Counters to enable + \note Disables one or more of the following: + - event counters (0-30) + - cycle counter +*/ +__STATIC_INLINE void ARM_PMU_CNTR_Disable(uint32_t mask) +{ + PMU->CNTENCLR = mask; +} + +/** + \brief Read cycle counter + \return Cycle count +*/ +__STATIC_INLINE uint32_t ARM_PMU_Get_CCNTR(void) +{ + return PMU->CCNTR; +} + +/** + \brief Read event counter + \param [in] num Event counter (0-30) to read + \return Event count +*/ +__STATIC_INLINE uint32_t ARM_PMU_Get_EVCNTR(uint32_t num) +{ + return PMU->EVCNTR[num]; +} + +/** + \brief Read counter overflow status + \return Counter overflow status bits for the following: + - event counters (0-30) + - cycle counter +*/ +__STATIC_INLINE uint32_t ARM_PMU_Get_CNTR_OVS(void) +{ + return PMU->OVSSET; +} + +/** + \brief Clear counter overflow status + \param [in] mask Counter overflow status bits to clear + \note Clears overflow status bits for one or more of the following: + - event counters (0-30) + - cycle counter +*/ +__STATIC_INLINE void ARM_PMU_Set_CNTR_OVS(uint32_t mask) +{ + PMU->OVSCLR = mask; +} + +/** + \brief Enable counter overflow interrupt request + \param [in] mask Counter overflow interrupt request bits to set + \note Sets overflow interrupt request bits for one or more of the following: + - event counters (0-30) + - cycle counter +*/ +__STATIC_INLINE void ARM_PMU_Set_CNTR_IRQ_Enable(uint32_t mask) +{ + PMU->INTENSET = mask; +} + +/** + \brief Disable counter overflow interrupt request + \param [in] mask Counter overflow interrupt request bits to clear + \note Clears overflow interrupt request bits for one or more of the following: + - event counters (0-30) + - cycle counter +*/ +__STATIC_INLINE void ARM_PMU_Set_CNTR_IRQ_Disable(uint32_t mask) +{ + PMU->INTENCLR = mask; +} + +/** + \brief Software increment event counter + \param [in] mask Counters to increment + \note Software increment bits for one or more event counters (0-30) +*/ +__STATIC_INLINE void ARM_PMU_CNTR_Increment(uint32_t mask) +{ + PMU->SWINC = mask; +} + +#endif diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/tz_context.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/tz_context.h new file mode 100755 index 00000000000..0d09749f3a5 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/Core/Include/tz_context.h @@ -0,0 +1,70 @@ +/****************************************************************************** + * @file tz_context.h + * @brief Context Management for Armv8-M TrustZone + * @version V1.0.1 + * @date 10. January 2018 + ******************************************************************************/ +/* + * Copyright (c) 2017-2018 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef TZ_CONTEXT_H +#define TZ_CONTEXT_H + +#include + +#ifndef TZ_MODULEID_T +#define TZ_MODULEID_T +/// \details Data type that identifies secure software modules called by a process. +typedef uint32_t TZ_ModuleId_t; +#endif + +/// \details TZ Memory ID identifies an allocated memory slot. +typedef uint32_t TZ_MemoryId_t; + +/// Initialize secure context memory system +/// \return execution status (1: success, 0: error) +uint32_t TZ_InitContextSystem_S (void); + +/// Allocate context memory for calling secure software modules in TrustZone +/// \param[in] module identifies software modules called from non-secure mode +/// \return value != 0 id TrustZone memory slot identifier +/// \return value 0 no memory available or internal error +TZ_MemoryId_t TZ_AllocModuleContext_S (TZ_ModuleId_t module); + +/// Free context memory that was previously allocated with \ref TZ_AllocModuleContext_S +/// \param[in] id TrustZone memory slot identifier +/// \return execution status (1: success, 0: error) +uint32_t TZ_FreeModuleContext_S (TZ_MemoryId_t id); + +/// Load secure context (called on RTOS thread context switch) +/// \param[in] id TrustZone memory slot identifier +/// \return execution status (1: success, 0: error) +uint32_t TZ_LoadContext_S (TZ_MemoryId_t id); + +/// Store secure context (called on RTOS thread context switch) +/// \param[in] id TrustZone memory slot identifier +/// \return execution status (1: success, 0: error) +uint32_t TZ_StoreContext_S (TZ_MemoryId_t id); + +#endif // TZ_CONTEXT_H diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_common_tables.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_common_tables.h new file mode 100755 index 00000000000..721b18dd2d4 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_common_tables.h @@ -0,0 +1,517 @@ +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_common_tables.h + * Description: Extern declaration for common tables + * + * $Date: 27. January 2017 + * $Revision: V.1.5.1 + * + * Target Processor: Cortex-M cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2017 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _ARM_COMMON_TABLES_H +#define _ARM_COMMON_TABLES_H + +#include "arm_math.h" + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES) + /* Double Precision Float CFFT twiddles */ + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREV_1024) + extern const uint16_t armBitRevTable[1024]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_16) + extern const uint64_t twiddleCoefF64_16[32]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_32) + extern const uint64_t twiddleCoefF64_32[64]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_64) + extern const uint64_t twiddleCoefF64_64[128]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_128) + extern const uint64_t twiddleCoefF64_128[256]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_256) + extern const uint64_t twiddleCoefF64_256[512]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_512) + extern const uint64_t twiddleCoefF64_512[1024]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_1024) + extern const uint64_t twiddleCoefF64_1024[2048]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_2048) + extern const uint64_t twiddleCoefF64_2048[4096]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F64_4096) + extern const uint64_t twiddleCoefF64_4096[8192]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_16) + extern const float32_t twiddleCoef_16[32]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_32) + extern const float32_t twiddleCoef_32[64]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_64) + extern const float32_t twiddleCoef_64[128]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_128) + extern const float32_t twiddleCoef_128[256]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_256) + extern const float32_t twiddleCoef_256[512]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_512) + extern const float32_t twiddleCoef_512[1024]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_1024) + extern const float32_t twiddleCoef_1024[2048]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_2048) + extern const float32_t twiddleCoef_2048[4096]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_4096) + extern const float32_t twiddleCoef_4096[8192]; + #define twiddleCoef twiddleCoef_4096 + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_16) + extern const q31_t twiddleCoef_16_q31[24]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_32) + extern const q31_t twiddleCoef_32_q31[48]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_64) + extern const q31_t twiddleCoef_64_q31[96]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_128) + extern const q31_t twiddleCoef_128_q31[192]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_256) + extern const q31_t twiddleCoef_256_q31[384]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_512) + extern const q31_t twiddleCoef_512_q31[768]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_1024) + extern const q31_t twiddleCoef_1024_q31[1536]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_2048) + extern const q31_t twiddleCoef_2048_q31[3072]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_4096) + extern const q31_t twiddleCoef_4096_q31[6144]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_16) + extern const q15_t twiddleCoef_16_q15[24]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_32) + extern const q15_t twiddleCoef_32_q15[48]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_64) + extern const q15_t twiddleCoef_64_q15[96]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_128) + extern const q15_t twiddleCoef_128_q15[192]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_256) + extern const q15_t twiddleCoef_256_q15[384]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_512) + extern const q15_t twiddleCoef_512_q15[768]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_1024) + extern const q15_t twiddleCoef_1024_q15[1536]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_2048) + extern const q15_t twiddleCoef_2048_q15[3072]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_4096) + extern const q15_t twiddleCoef_4096_q15[6144]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + /* Double Precision Float RFFT twiddles */ + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_32) + extern const uint64_t twiddleCoefF64_rfft_32[32]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_64) + extern const uint64_t twiddleCoefF64_rfft_64[64]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_128) + extern const uint64_t twiddleCoefF64_rfft_128[128]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_256) + extern const uint64_t twiddleCoefF64_rfft_256[256]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_512) + extern const uint64_t twiddleCoefF64_rfft_512[512]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_1024) + extern const uint64_t twiddleCoefF64_rfft_1024[1024]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_2048) + extern const uint64_t twiddleCoefF64_rfft_2048[2048]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F64_4096) + extern const uint64_t twiddleCoefF64_rfft_4096[4096]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_32) + extern const float32_t twiddleCoef_rfft_32[32]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_64) + extern const float32_t twiddleCoef_rfft_64[64]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_128) + extern const float32_t twiddleCoef_rfft_128[128]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_256) + extern const float32_t twiddleCoef_rfft_256[256]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_512) + extern const float32_t twiddleCoef_rfft_512[512]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_1024) + extern const float32_t twiddleCoef_rfft_1024[1024]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_2048) + extern const float32_t twiddleCoef_rfft_2048[2048]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_RFFT_F32_4096) + extern const float32_t twiddleCoef_rfft_4096[4096]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + + /* Double precision floating-point bit reversal tables */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_16) + #define ARMBITREVINDEXTABLEF64_16_TABLE_LENGTH ((uint16_t)12) + extern const uint16_t armBitRevIndexTableF64_16[ARMBITREVINDEXTABLEF64_16_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_32) + #define ARMBITREVINDEXTABLEF64_32_TABLE_LENGTH ((uint16_t)24) + extern const uint16_t armBitRevIndexTableF64_32[ARMBITREVINDEXTABLEF64_32_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_64) + #define ARMBITREVINDEXTABLEF64_64_TABLE_LENGTH ((uint16_t)56) + extern const uint16_t armBitRevIndexTableF64_64[ARMBITREVINDEXTABLEF64_64_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_128) + #define ARMBITREVINDEXTABLEF64_128_TABLE_LENGTH ((uint16_t)112) + extern const uint16_t armBitRevIndexTableF64_128[ARMBITREVINDEXTABLEF64_128_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_256) + #define ARMBITREVINDEXTABLEF64_256_TABLE_LENGTH ((uint16_t)240) + extern const uint16_t armBitRevIndexTableF64_256[ARMBITREVINDEXTABLEF64_256_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_512) + #define ARMBITREVINDEXTABLEF64_512_TABLE_LENGTH ((uint16_t)480) + extern const uint16_t armBitRevIndexTableF64_512[ARMBITREVINDEXTABLEF64_512_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_1024) + #define ARMBITREVINDEXTABLEF64_1024_TABLE_LENGTH ((uint16_t)992) + extern const uint16_t armBitRevIndexTableF64_1024[ARMBITREVINDEXTABLEF64_1024_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_2048) + #define ARMBITREVINDEXTABLEF64_2048_TABLE_LENGTH ((uint16_t)1984) + extern const uint16_t armBitRevIndexTableF64_2048[ARMBITREVINDEXTABLEF64_2048_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT64_4096) + #define ARMBITREVINDEXTABLEF64_4096_TABLE_LENGTH ((uint16_t)4032) + extern const uint16_t armBitRevIndexTableF64_4096[ARMBITREVINDEXTABLEF64_4096_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + /* floating-point bit reversal tables */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_16) + #define ARMBITREVINDEXTABLE_16_TABLE_LENGTH ((uint16_t)20) + extern const uint16_t armBitRevIndexTable16[ARMBITREVINDEXTABLE_16_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_32) + #define ARMBITREVINDEXTABLE_32_TABLE_LENGTH ((uint16_t)48) + extern const uint16_t armBitRevIndexTable32[ARMBITREVINDEXTABLE_32_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_64) + #define ARMBITREVINDEXTABLE_64_TABLE_LENGTH ((uint16_t)56) + extern const uint16_t armBitRevIndexTable64[ARMBITREVINDEXTABLE_64_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_128) + #define ARMBITREVINDEXTABLE_128_TABLE_LENGTH ((uint16_t)208) + extern const uint16_t armBitRevIndexTable128[ARMBITREVINDEXTABLE_128_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_256) + #define ARMBITREVINDEXTABLE_256_TABLE_LENGTH ((uint16_t)440) + extern const uint16_t armBitRevIndexTable256[ARMBITREVINDEXTABLE_256_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_512) + #define ARMBITREVINDEXTABLE_512_TABLE_LENGTH ((uint16_t)448) + extern const uint16_t armBitRevIndexTable512[ARMBITREVINDEXTABLE_512_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_1024) + #define ARMBITREVINDEXTABLE_1024_TABLE_LENGTH ((uint16_t)1800) + extern const uint16_t armBitRevIndexTable1024[ARMBITREVINDEXTABLE_1024_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_2048) + #define ARMBITREVINDEXTABLE_2048_TABLE_LENGTH ((uint16_t)3808) + extern const uint16_t armBitRevIndexTable2048[ARMBITREVINDEXTABLE_2048_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FLT_4096) + #define ARMBITREVINDEXTABLE_4096_TABLE_LENGTH ((uint16_t)4032) + extern const uint16_t armBitRevIndexTable4096[ARMBITREVINDEXTABLE_4096_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + + /* fixed-point bit reversal tables */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_16) + #define ARMBITREVINDEXTABLE_FIXED_16_TABLE_LENGTH ((uint16_t)12) + extern const uint16_t armBitRevIndexTable_fixed_16[ARMBITREVINDEXTABLE_FIXED_16_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_32) + #define ARMBITREVINDEXTABLE_FIXED_32_TABLE_LENGTH ((uint16_t)24) + extern const uint16_t armBitRevIndexTable_fixed_32[ARMBITREVINDEXTABLE_FIXED_32_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_64) + #define ARMBITREVINDEXTABLE_FIXED_64_TABLE_LENGTH ((uint16_t)56) + extern const uint16_t armBitRevIndexTable_fixed_64[ARMBITREVINDEXTABLE_FIXED_64_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_128) + #define ARMBITREVINDEXTABLE_FIXED_128_TABLE_LENGTH ((uint16_t)112) + extern const uint16_t armBitRevIndexTable_fixed_128[ARMBITREVINDEXTABLE_FIXED_128_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_256) + #define ARMBITREVINDEXTABLE_FIXED_256_TABLE_LENGTH ((uint16_t)240) + extern const uint16_t armBitRevIndexTable_fixed_256[ARMBITREVINDEXTABLE_FIXED_256_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_512) + #define ARMBITREVINDEXTABLE_FIXED_512_TABLE_LENGTH ((uint16_t)480) + extern const uint16_t armBitRevIndexTable_fixed_512[ARMBITREVINDEXTABLE_FIXED_512_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_1024) + #define ARMBITREVINDEXTABLE_FIXED_1024_TABLE_LENGTH ((uint16_t)992) + extern const uint16_t armBitRevIndexTable_fixed_1024[ARMBITREVINDEXTABLE_FIXED_1024_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_2048) + #define ARMBITREVINDEXTABLE_FIXED_2048_TABLE_LENGTH ((uint16_t)1984) + extern const uint16_t armBitRevIndexTable_fixed_2048[ARMBITREVINDEXTABLE_FIXED_2048_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_4096) + #define ARMBITREVINDEXTABLE_FIXED_4096_TABLE_LENGTH ((uint16_t)4032) + extern const uint16_t armBitRevIndexTable_fixed_4096[ARMBITREVINDEXTABLE_FIXED_4096_TABLE_LENGTH]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_REALCOEF_F32) + extern const float32_t realCoefA[8192]; + extern const float32_t realCoefB[8192]; + #endif + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_REALCOEF_Q31) + extern const q31_t realCoefAQ31[8192]; + extern const q31_t realCoefBQ31[8192]; + #endif + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_REALCOEF_Q15) + extern const q15_t realCoefAQ15[8192]; + extern const q15_t realCoefBQ15[8192]; + #endif + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_F32_128) + extern const float32_t Weights_128[256]; + extern const float32_t cos_factors_128[128]; + #endif + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_F32_512) + extern const float32_t Weights_512[1024]; + extern const float32_t cos_factors_512[512]; + #endif + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_F32_2048) + extern const float32_t Weights_2048[4096]; + extern const float32_t cos_factors_2048[2048]; + #endif + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_F32_8192) + extern const float32_t Weights_8192[16384]; + extern const float32_t cos_factors_8192[8192]; + #endif + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q15_128) + extern const q15_t WeightsQ15_128[256]; + extern const q15_t cos_factorsQ15_128[128]; + #endif + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q15_512) + extern const q15_t WeightsQ15_512[1024]; + extern const q15_t cos_factorsQ15_512[512]; + #endif + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q15_2048) + extern const q15_t WeightsQ15_2048[4096]; + extern const q15_t cos_factorsQ15_2048[2048]; + #endif + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q15_8192) + extern const q15_t WeightsQ15_8192[16384]; + extern const q15_t cos_factorsQ15_8192[8192]; + #endif + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q31_128) + extern const q31_t WeightsQ31_128[256]; + extern const q31_t cos_factorsQ31_128[128]; + #endif + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q31_512) + extern const q31_t WeightsQ31_512[1024]; + extern const q31_t cos_factorsQ31_512[512]; + #endif + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q31_2048) + extern const q31_t WeightsQ31_2048[4096]; + extern const q31_t cos_factorsQ31_2048[2048]; + #endif + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_DCT4_Q31_8192) + extern const q31_t WeightsQ31_8192[16384]; + extern const q31_t cos_factorsQ31_8192[8192]; + #endif + +#endif /* if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_TABLES) */ + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FAST_ALLOW_TABLES) + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_RECIP_Q15) + extern const q15_t armRecipTableQ15[64]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_RECIP_Q31) + extern const q31_t armRecipTableQ31[64]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */ + + /* Tables for Fast Math Sine and Cosine */ + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_SIN_F32) + extern const float32_t sinTable_f32[FAST_MATH_TABLE_SIZE + 1]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_SIN_Q31) + extern const q31_t sinTable_q31[FAST_MATH_TABLE_SIZE + 1]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */ + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_SIN_Q15) + extern const q15_t sinTable_q15[FAST_MATH_TABLE_SIZE + 1]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */ + + #if defined(ARM_MATH_MVEI) + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q31_MVE) + extern const q31_t sqrtTable_Q31[256]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */ + #endif + + #if defined(ARM_MATH_MVEI) + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q15_MVE) + extern const q15_t sqrtTable_Q15[256]; + #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */ + #endif + +#endif /* if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FAST_TABLES) */ + +#if (defined(ARM_MATH_MVEF) || defined(ARM_MATH_HELIUM)) && !defined(ARM_MATH_AUTOVECTORIZE) + extern const float32_t exp_tab[8]; + extern const float32_t __logf_lut_f32[8]; +#endif /* (defined(ARM_MATH_MVEF) || defined(ARM_MATH_HELIUM)) && !defined(ARM_MATH_AUTOVECTORIZE) */ + +#if (defined(ARM_MATH_MVEI) || defined(ARM_MATH_HELIUM)) +extern const unsigned char hwLUT[256]; +#endif /* (defined(ARM_MATH_MVEI) || defined(ARM_MATH_HELIUM)) */ + +#endif /* ARM_COMMON_TABLES_H */ + diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_const_structs.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_const_structs.h new file mode 100755 index 00000000000..83984c40cd0 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_const_structs.h @@ -0,0 +1,76 @@ +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_const_structs.h + * Description: Constant structs that are initialized for user convenience. + * For example, some can be given as arguments to the arm_cfft_f32() function. + * + * $Date: 27. January 2017 + * $Revision: V.1.5.1 + * + * Target Processor: Cortex-M cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2017 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _ARM_CONST_STRUCTS_H +#define _ARM_CONST_STRUCTS_H + +#include "arm_math.h" +#include "arm_common_tables.h" + + extern const arm_cfft_instance_f64 arm_cfft_sR_f64_len16; + extern const arm_cfft_instance_f64 arm_cfft_sR_f64_len32; + extern const arm_cfft_instance_f64 arm_cfft_sR_f64_len64; + extern const arm_cfft_instance_f64 arm_cfft_sR_f64_len128; + extern const arm_cfft_instance_f64 arm_cfft_sR_f64_len256; + extern const arm_cfft_instance_f64 arm_cfft_sR_f64_len512; + extern const arm_cfft_instance_f64 arm_cfft_sR_f64_len1024; + extern const arm_cfft_instance_f64 arm_cfft_sR_f64_len2048; + extern const arm_cfft_instance_f64 arm_cfft_sR_f64_len4096; + + extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len16; + extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len32; + extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len64; + extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len128; + extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len256; + extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len512; + extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len1024; + extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len2048; + extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len4096; + + extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len16; + extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len32; + extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len64; + extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len128; + extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len256; + extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len512; + extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len1024; + extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len2048; + extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len4096; + + extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len16; + extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len32; + extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len64; + extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len128; + extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len256; + extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len512; + extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len1024; + extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len2048; + extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len4096; + +#endif diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_helium_utils.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_helium_utils.h new file mode 100755 index 00000000000..7609d329f08 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_helium_utils.h @@ -0,0 +1,348 @@ +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_helium_utils.h + * Description: Utility functions for Helium development + * + * $Date: 09. September 2019 + * $Revision: V.1.5.1 + * + * Target Processor: Cortex-M cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _ARM_UTILS_HELIUM_H_ +#define _ARM_UTILS_HELIUM_H_ + +/*************************************** + +Definitions available for MVEF and MVEI + +***************************************/ +#if defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI) + +#define INACTIVELANE 0 /* inactive lane content */ + + +#endif /* defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI) */ + +/*************************************** + +Definitions available for MVEF only + +***************************************/ +#if defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) + +__STATIC_FORCEINLINE float32_t vecAddAcrossF32Mve(float32x4_t in) +{ + float32_t acc; + + acc = vgetq_lane(in, 0) + vgetq_lane(in, 1) + + vgetq_lane(in, 2) + vgetq_lane(in, 3); + + return acc; +} + +/* newton initial guess */ +#define INVSQRT_MAGIC_F32 0x5f3759df + +#define INVSQRT_NEWTON_MVE_F32(invSqrt, xHalf, xStart)\ +{ \ + float32x4_t tmp; \ + \ + /* tmp = xhalf * x * x */ \ + tmp = vmulq(xStart, xStart); \ + tmp = vmulq(tmp, xHalf); \ + /* (1.5f - xhalf * x * x) */ \ + tmp = vsubq(vdupq_n_f32(1.5f), tmp); \ + /* x = x*(1.5f-xhalf*x*x); */ \ + invSqrt = vmulq(tmp, xStart); \ +} +#endif /* defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) */ + +/*************************************** + +Definitions available for MVEI only + +***************************************/ +#if defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEI) + + +#include "arm_common_tables.h" + +/* Following functions are used to transpose matrix in f32 and q31 cases */ +__STATIC_INLINE arm_status arm_mat_trans_32bit_2x2_mve( + uint32_t * pDataSrc, + uint32_t * pDataDest) +{ + static const uint32x4_t vecOffs = { 0, 2, 1, 3 }; + /* + * + * | 0 1 | => | 0 2 | + * | 2 3 | | 1 3 | + * + */ + uint32x4_t vecIn = vldrwq_u32((uint32_t const *)pDataSrc); + vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs, vecIn); + + return (ARM_MATH_SUCCESS); +} + +__STATIC_INLINE arm_status arm_mat_trans_32bit_3x3_mve( + uint32_t * pDataSrc, + uint32_t * pDataDest) +{ + const uint32x4_t vecOffs1 = { 0, 3, 6, 1}; + const uint32x4_t vecOffs2 = { 4, 7, 2, 5}; + /* + * + * | 0 1 2 | | 0 3 6 | 4 x 32 flattened version | 0 3 6 1 | + * | 3 4 5 | => | 1 4 7 | => | 4 7 2 5 | + * | 6 7 8 | | 2 5 8 | (row major) | 8 . . . | + * + */ + uint32x4_t vecIn1 = vldrwq_u32((uint32_t const *) pDataSrc); + uint32x4_t vecIn2 = vldrwq_u32((uint32_t const *) &pDataSrc[4]); + + vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs1, vecIn1); + vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs2, vecIn2); + + pDataDest[8] = pDataSrc[8]; + + return (ARM_MATH_SUCCESS); +} + +__STATIC_INLINE arm_status arm_mat_trans_32bit_4x4_mve(uint32_t * pDataSrc, uint32_t * pDataDest) +{ + /* + * 4x4 Matrix transposition + * is 4 x de-interleave operation + * + * 0 1 2 3 0 4 8 12 + * 4 5 6 7 1 5 9 13 + * 8 9 10 11 2 6 10 14 + * 12 13 14 15 3 7 11 15 + */ + + uint32x4x4_t vecIn; + + vecIn = vld4q((uint32_t const *) pDataSrc); + vstrwq(pDataDest, vecIn.val[0]); + pDataDest += 4; + vstrwq(pDataDest, vecIn.val[1]); + pDataDest += 4; + vstrwq(pDataDest, vecIn.val[2]); + pDataDest += 4; + vstrwq(pDataDest, vecIn.val[3]); + + return (ARM_MATH_SUCCESS); +} + + +__STATIC_INLINE arm_status arm_mat_trans_32bit_generic_mve( + uint16_t srcRows, + uint16_t srcCols, + uint32_t * pDataSrc, + uint32_t * pDataDest) +{ + uint32x4_t vecOffs; + uint32_t i; + uint32_t blkCnt; + uint32_t const *pDataC; + uint32_t *pDataDestR; + uint32x4_t vecIn; + + vecOffs = vidupq_u32((uint32_t)0, 1); + vecOffs = vecOffs * srcCols; + + i = srcCols; + do + { + pDataC = (uint32_t const *) pDataSrc; + pDataDestR = pDataDest; + + blkCnt = srcRows >> 2; + while (blkCnt > 0U) + { + vecIn = vldrwq_gather_shifted_offset_u32(pDataC, vecOffs); + vstrwq(pDataDestR, vecIn); + pDataDestR += 4; + pDataC = pDataC + srcCols * 4; + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + + /* + * tail + */ + blkCnt = srcRows & 3; + if (blkCnt > 0U) + { + mve_pred16_t p0 = vctp32q(blkCnt); + vecIn = vldrwq_gather_shifted_offset_u32(pDataC, vecOffs); + vstrwq_p(pDataDestR, vecIn, p0); + } + + pDataSrc += 1; + pDataDest += srcRows; + } + while (--i); + + return (ARM_MATH_SUCCESS); +} + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q31_MVE) +__STATIC_INLINE q31x4_t FAST_VSQRT_Q31(q31x4_t vecIn) +{ + q63x2_t vecTmpLL; + q31x4_t vecTmp0, vecTmp1; + q31_t scale; + q63_t tmp64; + q31x4_t vecNrm, vecDst, vecIdx, vecSignBits; + + + vecSignBits = vclsq(vecIn); + vecSignBits = vbicq(vecSignBits, 1); + /* + * in = in << no_of_sign_bits; + */ + vecNrm = vshlq(vecIn, vecSignBits); + /* + * index = in >> 24; + */ + vecIdx = vecNrm >> 24; + vecIdx = vecIdx << 1; + + vecTmp0 = vldrwq_gather_shifted_offset_s32(sqrtTable_Q31, vecIdx); + + vecIdx = vecIdx + 1; + + vecTmp1 = vldrwq_gather_shifted_offset_s32(sqrtTable_Q31, vecIdx); + + vecTmp1 = vqrdmulhq(vecTmp1, vecNrm); + vecTmp0 = vecTmp0 - vecTmp1; + vecTmp1 = vqrdmulhq(vecTmp0, vecTmp0); + vecTmp1 = vqrdmulhq(vecNrm, vecTmp1); + vecTmp1 = vdupq_n_s32(0x18000000) - vecTmp1; + vecTmp0 = vqrdmulhq(vecTmp0, vecTmp1); + vecTmpLL = vmullbq_int(vecNrm, vecTmp0); + + /* + * scale elements 0, 2 + */ + scale = 26 + (vecSignBits[0] >> 1); + tmp64 = asrl(vecTmpLL[0], scale); + vecDst[0] = (q31_t) tmp64; + + scale = 26 + (vecSignBits[2] >> 1); + tmp64 = asrl(vecTmpLL[1], scale); + vecDst[2] = (q31_t) tmp64; + + vecTmpLL = vmulltq_int(vecNrm, vecTmp0); + + /* + * scale elements 1, 3 + */ + scale = 26 + (vecSignBits[1] >> 1); + tmp64 = asrl(vecTmpLL[0], scale); + vecDst[1] = (q31_t) tmp64; + + scale = 26 + (vecSignBits[3] >> 1); + tmp64 = asrl(vecTmpLL[1], scale); + vecDst[3] = (q31_t) tmp64; + /* + * set negative values to 0 + */ + vecDst = vdupq_m(vecDst, 0, vcmpltq_n_s32(vecIn, 0)); + + return vecDst; +} +#endif + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q15_MVE) +__STATIC_INLINE q15x8_t FAST_VSQRT_Q15(q15x8_t vecIn) +{ + q31x4_t vecTmpLev, vecTmpLodd, vecSignL; + q15x8_t vecTmp0, vecTmp1; + q15x8_t vecNrm, vecDst, vecIdx, vecSignBits; + + vecDst = vuninitializedq_s16(); + + vecSignBits = vclsq(vecIn); + vecSignBits = vbicq(vecSignBits, 1); + /* + * in = in << no_of_sign_bits; + */ + vecNrm = vshlq(vecIn, vecSignBits); + + vecIdx = vecNrm >> 8; + vecIdx = vecIdx << 1; + + vecTmp0 = vldrhq_gather_shifted_offset_s16(sqrtTable_Q15, vecIdx); + + vecIdx = vecIdx + 1; + + vecTmp1 = vldrhq_gather_shifted_offset_s16(sqrtTable_Q15, vecIdx); + + vecTmp1 = vqrdmulhq(vecTmp1, vecNrm); + vecTmp0 = vecTmp0 - vecTmp1; + vecTmp1 = vqrdmulhq(vecTmp0, vecTmp0); + vecTmp1 = vqrdmulhq(vecNrm, vecTmp1); + vecTmp1 = vdupq_n_s16(0x1800) - vecTmp1; + vecTmp0 = vqrdmulhq(vecTmp0, vecTmp1); + + vecSignBits = vecSignBits >> 1; + + vecTmpLev = vmullbq_int(vecNrm, vecTmp0); + vecTmpLodd = vmulltq_int(vecNrm, vecTmp0); + + vecTmp0 = vecSignBits + 10; + /* + * negate sign to apply register based vshl + */ + vecTmp0 = -vecTmp0; + + /* + * shift even elements + */ + vecSignL = vmovlbq(vecTmp0); + vecTmpLev = vshlq(vecTmpLev, vecSignL); + /* + * shift odd elements + */ + vecSignL = vmovltq(vecTmp0); + vecTmpLodd = vshlq(vecTmpLodd, vecSignL); + /* + * merge and narrow odd and even parts + */ + vecDst = vmovnbq_s32(vecDst, vecTmpLev); + vecDst = vmovntq_s32(vecDst, vecTmpLodd); + /* + * set negative values to 0 + */ + vecDst = vdupq_m(vecDst, 0, vcmpltq_n_s16(vecIn, 0)); + + return vecDst; +} +#endif + +#endif /* defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEI) */ + +#endif diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_math.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_math.h new file mode 100755 index 00000000000..48bee62cd98 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_math.h @@ -0,0 +1,8970 @@ +/****************************************************************************** + * @file arm_math.h + * @brief Public header file for CMSIS DSP Library + * @version V1.7.0 + * @date 18. March 2019 + ******************************************************************************/ +/* + * Copyright (c) 2010-2019 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + \mainpage CMSIS DSP Software Library + * + * Introduction + * ------------ + * + * This user manual describes the CMSIS DSP software library, + * a suite of common signal processing functions for use on Cortex-M and Cortex-A processor + * based devices. + * + * The library is divided into a number of functions each covering a specific category: + * - Basic math functions + * - Fast math functions + * - Complex math functions + * - Filtering functions + * - Matrix functions + * - Transform functions + * - Motor control functions + * - Statistical functions + * - Support functions + * - Interpolation functions + * - Support Vector Machine functions (SVM) + * - Bayes classifier functions + * - Distance functions + * + * The library has generally separate functions for operating on 8-bit integers, 16-bit integers, + * 32-bit integer and 32-bit floating-point values. + * + * Using the Library + * ------------ + * + * The library installer contains prebuilt versions of the libraries in the Lib folder. + * + * Here is the list of pre-built libraries : + * - arm_cortexM7lfdp_math.lib (Cortex-M7, Little endian, Double Precision Floating Point Unit) + * - arm_cortexM7bfdp_math.lib (Cortex-M7, Big endian, Double Precision Floating Point Unit) + * - arm_cortexM7lfsp_math.lib (Cortex-M7, Little endian, Single Precision Floating Point Unit) + * - arm_cortexM7bfsp_math.lib (Cortex-M7, Big endian and Single Precision Floating Point Unit on) + * - arm_cortexM7l_math.lib (Cortex-M7, Little endian) + * - arm_cortexM7b_math.lib (Cortex-M7, Big endian) + * - arm_cortexM4lf_math.lib (Cortex-M4, Little endian, Floating Point Unit) + * - arm_cortexM4bf_math.lib (Cortex-M4, Big endian, Floating Point Unit) + * - arm_cortexM4l_math.lib (Cortex-M4, Little endian) + * - arm_cortexM4b_math.lib (Cortex-M4, Big endian) + * - arm_cortexM3l_math.lib (Cortex-M3, Little endian) + * - arm_cortexM3b_math.lib (Cortex-M3, Big endian) + * - arm_cortexM0l_math.lib (Cortex-M0 / Cortex-M0+, Little endian) + * - arm_cortexM0b_math.lib (Cortex-M0 / Cortex-M0+, Big endian) + * - arm_ARMv8MBLl_math.lib (Armv8-M Baseline, Little endian) + * - arm_ARMv8MMLl_math.lib (Armv8-M Mainline, Little endian) + * - arm_ARMv8MMLlfsp_math.lib (Armv8-M Mainline, Little endian, Single Precision Floating Point Unit) + * - arm_ARMv8MMLld_math.lib (Armv8-M Mainline, Little endian, DSP instructions) + * - arm_ARMv8MMLldfsp_math.lib (Armv8-M Mainline, Little endian, DSP instructions, Single Precision Floating Point Unit) + * + * The library functions are declared in the public file arm_math.h which is placed in the Include folder. + * Simply include this file and link the appropriate library in the application and begin calling the library functions. The Library supports single + * public header file arm_math.h for Cortex-M cores with little endian and big endian. Same header file will be used for floating point unit(FPU) variants. + * + * + * Examples + * -------- + * + * The library ships with a number of examples which demonstrate how to use the library functions. + * + * Toolchain Support + * ------------ + * + * The library is now tested on Fast Models building with cmake. + * Core M0, M7, A5 are tested. + * + * + * + * Building the Library + * ------------ + * + * The library installer contains a project file to rebuild libraries on MDK toolchain in the CMSIS\\DSP\\Projects\\ARM folder. + * - arm_cortexM_math.uvprojx + * + * + * The libraries can be built by opening the arm_cortexM_math.uvprojx project in MDK-ARM, selecting a specific target, and defining the optional preprocessor macros detailed above. + * + * There is also a work in progress cmake build. The README file is giving more details. + * + * Preprocessor Macros + * ------------ + * + * Each library project have different preprocessor macros. + * + * - ARM_MATH_BIG_ENDIAN: + * + * Define macro ARM_MATH_BIG_ENDIAN to build the library for big endian targets. By default library builds for little endian targets. + * + * - ARM_MATH_MATRIX_CHECK: + * + * Define macro ARM_MATH_MATRIX_CHECK for checking on the input and output sizes of matrices + * + * - ARM_MATH_ROUNDING: + * + * Define macro ARM_MATH_ROUNDING for rounding on support functions + * + * - ARM_MATH_LOOPUNROLL: + * + * Define macro ARM_MATH_LOOPUNROLL to enable manual loop unrolling in DSP functions + * + * - ARM_MATH_NEON: + * + * Define macro ARM_MATH_NEON to enable Neon versions of the DSP functions. + * It is not enabled by default when Neon is available because performances are + * dependent on the compiler and target architecture. + * + * - ARM_MATH_NEON_EXPERIMENTAL: + * + * Define macro ARM_MATH_NEON_EXPERIMENTAL to enable experimental Neon versions of + * of some DSP functions. Experimental Neon versions currently do not have better + * performances than the scalar versions. + * + * - ARM_MATH_HELIUM: + * + * It implies the flags ARM_MATH_MVEF and ARM_MATH_MVEI and ARM_MATH_FLOAT16. + * + * - ARM_MATH_MVEF: + * + * Select Helium versions of the f32 algorithms. + * It implies ARM_MATH_FLOAT16 and ARM_MATH_MVEI. + * + * - ARM_MATH_MVEI: + * + * Select Helium versions of the int and fixed point algorithms. + * + * - ARM_MATH_FLOAT16: + * + * Float16 implementations of some algorithms (Requires MVE extension). + * + *
+ * CMSIS-DSP in ARM::CMSIS Pack + * ----------------------------- + * + * The following files relevant to CMSIS-DSP are present in the ARM::CMSIS Pack directories: + * |File/Folder |Content | + * |---------------------------------|------------------------------------------------------------------------| + * |\b CMSIS\\Documentation\\DSP | This documentation | + * |\b CMSIS\\DSP\\DSP_Lib_TestSuite | DSP_Lib test suite | + * |\b CMSIS\\DSP\\Examples | Example projects demonstrating the usage of the library functions | + * |\b CMSIS\\DSP\\Include | DSP_Lib include files | + * |\b CMSIS\\DSP\\Lib | DSP_Lib binaries | + * |\b CMSIS\\DSP\\Projects | Projects to rebuild DSP_Lib binaries | + * |\b CMSIS\\DSP\\Source | DSP_Lib source files | + * + *
+ * Revision History of CMSIS-DSP + * ------------ + * Please refer to \ref ChangeLog_pg. + */ + + +/** + * @defgroup groupMath Basic Math Functions + */ + +/** + * @defgroup groupFastMath Fast Math Functions + * This set of functions provides a fast approximation to sine, cosine, and square root. + * As compared to most of the other functions in the CMSIS math library, the fast math functions + * operate on individual values and not arrays. + * There are separate functions for Q15, Q31, and floating-point data. + * + */ + +/** + * @defgroup groupCmplxMath Complex Math Functions + * This set of functions operates on complex data vectors. + * The data in the complex arrays is stored in an interleaved fashion + * (real, imag, real, imag, ...). + * In the API functions, the number of samples in a complex array refers + * to the number of complex values; the array contains twice this number of + * real values. + */ + +/** + * @defgroup groupFilters Filtering Functions + */ + +/** + * @defgroup groupMatrix Matrix Functions + * + * This set of functions provides basic matrix math operations. + * The functions operate on matrix data structures. For example, + * the type + * definition for the floating-point matrix structure is shown + * below: + *
+ *     typedef struct
+ *     {
+ *       uint16_t numRows;     // number of rows of the matrix.
+ *       uint16_t numCols;     // number of columns of the matrix.
+ *       float32_t *pData;     // points to the data of the matrix.
+ *     } arm_matrix_instance_f32;
+ * 
+ * There are similar definitions for Q15 and Q31 data types. + * + * The structure specifies the size of the matrix and then points to + * an array of data. The array is of size numRows X numCols + * and the values are arranged in row order. That is, the + * matrix element (i, j) is stored at: + *
+ *     pData[i*numCols + j]
+ * 
+ * + * \par Init Functions + * There is an associated initialization function for each type of matrix + * data structure. + * The initialization function sets the values of the internal structure fields. + * Refer to \ref arm_mat_init_f32(), \ref arm_mat_init_q31() and \ref arm_mat_init_q15() + * for floating-point, Q31 and Q15 types, respectively. + * + * \par + * Use of the initialization function is optional. However, if initialization function is used + * then the instance structure cannot be placed into a const data section. + * To place the instance structure in a const data + * section, manually initialize the data structure. For example: + *
+ * arm_matrix_instance_f32 S = {nRows, nColumns, pData};
+ * arm_matrix_instance_q31 S = {nRows, nColumns, pData};
+ * arm_matrix_instance_q15 S = {nRows, nColumns, pData};
+ * 
+ * where nRows specifies the number of rows, nColumns + * specifies the number of columns, and pData points to the + * data array. + * + * \par Size Checking + * By default all of the matrix functions perform size checking on the input and + * output matrices. For example, the matrix addition function verifies that the + * two input matrices and the output matrix all have the same number of rows and + * columns. If the size check fails the functions return: + *
+ *     ARM_MATH_SIZE_MISMATCH
+ * 
+ * Otherwise the functions return + *
+ *     ARM_MATH_SUCCESS
+ * 
+ * There is some overhead associated with this matrix size checking. + * The matrix size checking is enabled via the \#define + *
+ *     ARM_MATH_MATRIX_CHECK
+ * 
+ * within the library project settings. By default this macro is defined + * and size checking is enabled. By changing the project settings and + * undefining this macro size checking is eliminated and the functions + * run a bit faster. With size checking disabled the functions always + * return ARM_MATH_SUCCESS. + */ + +/** + * @defgroup groupTransforms Transform Functions + */ + +/** + * @defgroup groupController Controller Functions + */ + +/** + * @defgroup groupStats Statistics Functions + */ + +/** + * @defgroup groupSupport Support Functions + */ + +/** + * @defgroup groupInterpolation Interpolation Functions + * These functions perform 1- and 2-dimensional interpolation of data. + * Linear interpolation is used for 1-dimensional data and + * bilinear interpolation is used for 2-dimensional data. + */ + +/** + * @defgroup groupExamples Examples + */ + +/** + * @defgroup groupSVM SVM Functions + * This set of functions is implementing SVM classification on 2 classes. + * The training must be done from scikit-learn. The parameters can be easily + * generated from the scikit-learn object. Some examples are given in + * DSP/Testing/PatternGeneration/SVM.py + * + * If more than 2 classes are needed, the functions in this folder + * will have to be used, as building blocks, to do multi-class classification. + * + * No multi-class classification is provided in this SVM folder. + * + */ + + +/** + * @defgroup groupBayes Bayesian estimators + * + * Implement the naive gaussian Bayes estimator. + * The training must be done from scikit-learn. + * + * The parameters can be easily + * generated from the scikit-learn object. Some examples are given in + * DSP/Testing/PatternGeneration/Bayes.py + */ + +/** + * @defgroup groupDistance Distance functions + * + * Distance functions for use with clustering algorithms. + * There are distance functions for float vectors and boolean vectors. + * + */ + + +#ifndef _ARM_MATH_H +#define _ARM_MATH_H + +#ifdef __cplusplus +extern "C" +{ +#endif + +/* Compiler specific diagnostic adjustment */ +#if defined ( __CC_ARM ) + +#elif defined ( __ARMCC_VERSION ) && ( __ARMCC_VERSION >= 6010050 ) + +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wsign-conversion" + #pragma GCC diagnostic ignored "-Wconversion" + #pragma GCC diagnostic ignored "-Wunused-parameter" + +#elif defined ( __ICCARM__ ) + +#elif defined ( __TI_ARM__ ) + +#elif defined ( __CSMC__ ) + +#elif defined ( __TASKING__ ) + +#elif defined ( _MSC_VER ) + +#else + #error Unknown compiler +#endif + + +/* Included for instrinsics definitions */ +#if defined (_MSC_VER ) +#include +#define __STATIC_FORCEINLINE static __forceinline +#define __STATIC_INLINE static __inline +#define __ALIGNED(x) __declspec(align(x)) + +#elif defined (__GNUC_PYTHON__) +#include +#define __ALIGNED(x) __attribute__((aligned(x))) +#define __STATIC_FORCEINLINE static __attribute__((inline)) +#define __STATIC_INLINE static __attribute__((inline)) +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic ignored "-Wattributes" + +#else +#include "cmsis_compiler.h" +#endif + + + +#include +#include +#include +#include + + +#define F64_MAX ((float64_t)DBL_MAX) +#define F32_MAX ((float32_t)FLT_MAX) + +#if defined(ARM_MATH_FLOAT16) +#define F16_MAX ((float16_t)FLT_MAX) +#endif + +#define F64_MIN (-DBL_MAX) +#define F32_MIN (-FLT_MAX) + +#if defined(ARM_MATH_FLOAT16) +#define F16_MIN (-(float16_t)FLT_MAX) +#endif + +#define F64_ABSMAX ((float64_t)DBL_MAX) +#define F32_ABSMAX ((float32_t)FLT_MAX) + +#if defined(ARM_MATH_FLOAT16) +#define F16_ABSMAX ((float16_t)FLT_MAX) +#endif + +#define F64_ABSMIN ((float64_t)0.0) +#define F32_ABSMIN ((float32_t)0.0) + +#if defined(ARM_MATH_FLOAT16) +#define F16_ABSMIN ((float16_t)0.0) +#endif + +#define Q31_MAX ((q31_t)(0x7FFFFFFFL)) +#define Q15_MAX ((q15_t)(0x7FFF)) +#define Q7_MAX ((q7_t)(0x7F)) +#define Q31_MIN ((q31_t)(0x80000000L)) +#define Q15_MIN ((q15_t)(0x8000)) +#define Q7_MIN ((q7_t)(0x80)) + +#define Q31_ABSMAX ((q31_t)(0x7FFFFFFFL)) +#define Q15_ABSMAX ((q15_t)(0x7FFF)) +#define Q7_ABSMAX ((q7_t)(0x7F)) +#define Q31_ABSMIN ((q31_t)0) +#define Q15_ABSMIN ((q15_t)0) +#define Q7_ABSMIN ((q7_t)0) + +/* evaluate ARM DSP feature */ +#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) + #define ARM_MATH_DSP 1 +#endif + +#if defined(ARM_MATH_NEON) +#include +#endif + +#if defined (ARM_MATH_HELIUM) + #define ARM_MATH_MVEF + #define ARM_MATH_FLOAT16 +#endif + +#if defined (ARM_MATH_MVEF) + #define ARM_MATH_MVEI + #define ARM_MATH_FLOAT16 +#endif + +#if defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI) +#include +#endif + + + /** + * @brief Macros required for reciprocal calculation in Normalized LMS + */ + +#define DELTA_Q31 ((q31_t)(0x100)) +#define DELTA_Q15 ((q15_t)0x5) +#define INDEX_MASK 0x0000003F +#ifndef PI + #define PI 3.14159265358979f +#endif + + /** + * @brief Macros required for SINE and COSINE Fast math approximations + */ + +#define FAST_MATH_TABLE_SIZE 512 +#define FAST_MATH_Q31_SHIFT (32 - 10) +#define FAST_MATH_Q15_SHIFT (16 - 10) +#define CONTROLLER_Q31_SHIFT (32 - 9) +#define TABLE_SPACING_Q31 0x400000 +#define TABLE_SPACING_Q15 0x80 + + /** + * @brief Macros required for SINE and COSINE Controller functions + */ + /* 1.31(q31) Fixed value of 2/360 */ + /* -1 to +1 is divided into 360 values so total spacing is (2/360) */ +#define INPUT_SPACING 0xB60B61 + + /** + * @brief Macros for complex numbers + */ + + /* Dimension C vector space */ + #define CMPLX_DIM 2 + + /** + * @brief Error status returned by some functions in the library. + */ + + typedef enum + { + ARM_MATH_SUCCESS = 0, /**< No error */ + ARM_MATH_ARGUMENT_ERROR = -1, /**< One or more arguments are incorrect */ + ARM_MATH_LENGTH_ERROR = -2, /**< Length of data buffer is incorrect */ + ARM_MATH_SIZE_MISMATCH = -3, /**< Size of matrices is not compatible with the operation */ + ARM_MATH_NANINF = -4, /**< Not-a-number (NaN) or infinity is generated */ + ARM_MATH_SINGULAR = -5, /**< Input matrix is singular and cannot be inverted */ + ARM_MATH_TEST_FAILURE = -6 /**< Test Failed */ + } arm_status; + + /** + * @brief 8-bit fractional data type in 1.7 format. + */ + typedef int8_t q7_t; + + /** + * @brief 16-bit fractional data type in 1.15 format. + */ + typedef int16_t q15_t; + + /** + * @brief 32-bit fractional data type in 1.31 format. + */ + typedef int32_t q31_t; + + /** + * @brief 64-bit fractional data type in 1.63 format. + */ + typedef int64_t q63_t; + + /** + * @brief 32-bit floating-point type definition. + */ + typedef float float32_t; + + /** + * @brief 64-bit floating-point type definition. + */ + typedef double float64_t; + + /** + * @brief vector types + */ +#if defined(ARM_MATH_NEON) || defined (ARM_MATH_MVEI) + /** + * @brief 64-bit fractional 128-bit vector data type in 1.63 format + */ + typedef int64x2_t q63x2_t; + + /** + * @brief 32-bit fractional 128-bit vector data type in 1.31 format. + */ + typedef int32x4_t q31x4_t; + + /** + * @brief 16-bit fractional 128-bit vector data type with 16-bit alignement in 1.15 format. + */ + typedef __ALIGNED(2) int16x8_t q15x8_t; + + /** + * @brief 8-bit fractional 128-bit vector data type with 8-bit alignement in 1.7 format. + */ + typedef __ALIGNED(1) int8x16_t q7x16_t; + + /** + * @brief 32-bit fractional 128-bit vector pair data type in 1.31 format. + */ + typedef int32x4x2_t q31x4x2_t; + + /** + * @brief 32-bit fractional 128-bit vector quadruplet data type in 1.31 format. + */ + typedef int32x4x4_t q31x4x4_t; + + /** + * @brief 16-bit fractional 128-bit vector pair data type in 1.15 format. + */ + typedef int16x8x2_t q15x8x2_t; + + /** + * @brief 16-bit fractional 128-bit vector quadruplet data type in 1.15 format. + */ + typedef int16x8x4_t q15x8x4_t; + + /** + * @brief 8-bit fractional 128-bit vector pair data type in 1.7 format. + */ + typedef int8x16x2_t q7x16x2_t; + + /** + * @brief 8-bit fractional 128-bit vector quadruplet data type in 1.7 format. + */ + typedef int8x16x4_t q7x16x4_t; + + /** + * @brief 32-bit fractional data type in 9.23 format. + */ + typedef int32_t q23_t; + + /** + * @brief 32-bit fractional 128-bit vector data type in 9.23 format. + */ + typedef int32x4_t q23x4_t; + + /** + * @brief 64-bit status 128-bit vector data type. + */ + typedef int64x2_t status64x2_t; + + /** + * @brief 32-bit status 128-bit vector data type. + */ + typedef int32x4_t status32x4_t; + + /** + * @brief 16-bit status 128-bit vector data type. + */ + typedef int16x8_t status16x8_t; + + /** + * @brief 8-bit status 128-bit vector data type. + */ + typedef int8x16_t status8x16_t; + + +#endif + +#if defined(ARM_MATH_NEON) || defined(ARM_MATH_MVEF) /* floating point vector*/ + /** + * @brief 32-bit floating-point 128-bit vector type + */ + typedef float32x4_t f32x4_t; + +#if defined(ARM_MATH_FLOAT16) + /** + * @brief 16-bit floating-point 128-bit vector data type + */ + typedef __ALIGNED(2) float16x8_t f16x8_t; +#endif + + /** + * @brief 32-bit floating-point 128-bit vector pair data type + */ + typedef float32x4x2_t f32x4x2_t; + + /** + * @brief 32-bit floating-point 128-bit vector quadruplet data type + */ + typedef float32x4x4_t f32x4x4_t; + +#if defined(ARM_MATH_FLOAT16) + /** + * @brief 16-bit floating-point 128-bit vector pair data type + */ + typedef float16x8x2_t f16x8x2_t; + + /** + * @brief 16-bit floating-point 128-bit vector quadruplet data type + */ + typedef float16x8x4_t f16x8x4_t; +#endif + + /** + * @brief 32-bit ubiquitous 128-bit vector data type + */ + typedef union _any32x4_t + { + float32x4_t f; + int32x4_t i; + } any32x4_t; + +#if defined(ARM_MATH_FLOAT16) + /** + * @brief 16-bit ubiquitous 128-bit vector data type + */ + typedef union _any16x8_t + { + float16x8_t f; + int16x8_t i; + } any16x8_t; +#endif + +#endif + +#if defined(ARM_MATH_NEON) + /** + * @brief 32-bit fractional 64-bit vector data type in 1.31 format. + */ + typedef int32x2_t q31x2_t; + + /** + * @brief 16-bit fractional 64-bit vector data type in 1.15 format. + */ + typedef __ALIGNED(2) int16x4_t q15x4_t; + + /** + * @brief 8-bit fractional 64-bit vector data type in 1.7 format. + */ + typedef __ALIGNED(1) int8x8_t q7x8_t; + + /** + * @brief 32-bit float 64-bit vector data type. + */ + typedef float32x2_t f32x2_t; + +#if defined(ARM_MATH_FLOAT16) + /** + * @brief 16-bit float 64-bit vector data type. + */ + typedef __ALIGNED(2) float16x4_t f16x4_t; +#endif + + /** + * @brief 32-bit floating-point 128-bit vector triplet data type + */ + typedef float32x4x3_t f32x4x3_t; + +#if defined(ARM_MATH_FLOAT16) + /** + * @brief 16-bit floating-point 128-bit vector triplet data type + */ + typedef float16x8x3_t f16x8x3_t; +#endif + + /** + * @brief 32-bit fractional 128-bit vector triplet data type in 1.31 format + */ + typedef int32x4x3_t q31x4x3_t; + + /** + * @brief 16-bit fractional 128-bit vector triplet data type in 1.15 format + */ + typedef int16x8x3_t q15x8x3_t; + + /** + * @brief 8-bit fractional 128-bit vector triplet data type in 1.7 format + */ + typedef int8x16x3_t q7x16x3_t; + + /** + * @brief 32-bit floating-point 64-bit vector pair data type + */ + typedef float32x2x2_t f32x2x2_t; + + /** + * @brief 32-bit floating-point 64-bit vector triplet data type + */ + typedef float32x2x3_t f32x2x3_t; + + /** + * @brief 32-bit floating-point 64-bit vector quadruplet data type + */ + typedef float32x2x4_t f32x2x4_t; + +#if defined(ARM_MATH_FLOAT16) + /** + * @brief 16-bit floating-point 64-bit vector pair data type + */ + typedef float16x4x2_t f16x4x2_t; + + /** + * @brief 16-bit floating-point 64-bit vector triplet data type + */ + typedef float16x4x3_t f16x4x3_t; + + /** + * @brief 16-bit floating-point 64-bit vector quadruplet data type + */ + typedef float16x4x4_t f16x4x4_t; +#endif + + /** + * @brief 32-bit fractional 64-bit vector pair data type in 1.31 format + */ + typedef int32x2x2_t q31x2x2_t; + + /** + * @brief 32-bit fractional 64-bit vector triplet data type in 1.31 format + */ + typedef int32x2x3_t q31x2x3_t; + + /** + * @brief 32-bit fractional 64-bit vector quadruplet data type in 1.31 format + */ + typedef int32x4x3_t q31x2x4_t; + + /** + * @brief 16-bit fractional 64-bit vector pair data type in 1.15 format + */ + typedef int16x4x2_t q15x4x2_t; + + /** + * @brief 16-bit fractional 64-bit vector triplet data type in 1.15 format + */ + typedef int16x4x2_t q15x4x3_t; + + /** + * @brief 16-bit fractional 64-bit vector quadruplet data type in 1.15 format + */ + typedef int16x4x3_t q15x4x4_t; + + /** + * @brief 8-bit fractional 64-bit vector pair data type in 1.7 format + */ + typedef int8x8x2_t q7x8x2_t; + + /** + * @brief 8-bit fractional 64-bit vector triplet data type in 1.7 format + */ + typedef int8x8x3_t q7x8x3_t; + + /** + * @brief 8-bit fractional 64-bit vector quadruplet data type in 1.7 format + */ + typedef int8x8x4_t q7x8x4_t; + + /** + * @brief 32-bit ubiquitous 64-bit vector data type + */ + typedef union _any32x2_t + { + float32x2_t f; + int32x2_t i; + } any32x2_t; + +#if defined(ARM_MATH_FLOAT16) + /** + * @brief 16-bit ubiquitous 64-bit vector data type + */ + typedef union _any16x4_t + { + float16x4_t f; + int16x4_t i; + } any16x4_t; +#endif + + /** + * @brief 32-bit status 64-bit vector data type. + */ + typedef int32x4_t status32x2_t; + + /** + * @brief 16-bit status 64-bit vector data type. + */ + typedef int16x8_t status16x4_t; + + /** + * @brief 8-bit status 64-bit vector data type. + */ + typedef int8x16_t status8x8_t; + +#endif + + + +/** + @brief definition to read/write two 16 bit values. + @deprecated + */ +#if defined ( __CC_ARM ) + #define __SIMD32_TYPE int32_t __packed +#elif defined ( __ARMCC_VERSION ) && ( __ARMCC_VERSION >= 6010050 ) + #define __SIMD32_TYPE int32_t +#elif defined ( __GNUC__ ) + #define __SIMD32_TYPE int32_t +#elif defined ( __ICCARM__ ) + #define __SIMD32_TYPE int32_t __packed +#elif defined ( __TI_ARM__ ) + #define __SIMD32_TYPE int32_t +#elif defined ( __CSMC__ ) + #define __SIMD32_TYPE int32_t +#elif defined ( __TASKING__ ) + #define __SIMD32_TYPE __un(aligned) int32_t +#elif defined(_MSC_VER ) + #define __SIMD32_TYPE int32_t +#else + #error Unknown compiler +#endif + +#define __SIMD32(addr) (*(__SIMD32_TYPE **) & (addr)) +#define __SIMD32_CONST(addr) ( (__SIMD32_TYPE * ) (addr)) +#define _SIMD32_OFFSET(addr) (*(__SIMD32_TYPE * ) (addr)) +#define __SIMD64(addr) (*( int64_t **) & (addr)) + +#define STEP(x) (x) <= 0 ? 0 : 1 +#define SQ(x) ((x) * (x)) + +/* SIMD replacement */ + + +/** + @brief Read 2 Q15 from Q15 pointer. + @param[in] pQ15 points to input value + @return Q31 value + */ +__STATIC_FORCEINLINE q31_t read_q15x2 ( + q15_t * pQ15) +{ + q31_t val; + +#ifdef __ARM_FEATURE_UNALIGNED + memcpy (&val, pQ15, 4); +#else + val = (pQ15[1] << 16) | (pQ15[0] & 0x0FFFF) ; +#endif + + return (val); +} + +/** + @brief Read 2 Q15 from Q15 pointer and increment pointer afterwards. + @param[in] pQ15 points to input value + @return Q31 value + */ +__STATIC_FORCEINLINE q31_t read_q15x2_ia ( + q15_t ** pQ15) +{ + q31_t val; + +#ifdef __ARM_FEATURE_UNALIGNED + memcpy (&val, *pQ15, 4); +#else + val = ((*pQ15)[1] << 16) | ((*pQ15)[0] & 0x0FFFF); +#endif + + *pQ15 += 2; + return (val); +} + +/** + @brief Read 2 Q15 from Q15 pointer and decrement pointer afterwards. + @param[in] pQ15 points to input value + @return Q31 value + */ +__STATIC_FORCEINLINE q31_t read_q15x2_da ( + q15_t ** pQ15) +{ + q31_t val; + +#ifdef __ARM_FEATURE_UNALIGNED + memcpy (&val, *pQ15, 4); +#else + val = ((*pQ15)[1] << 16) | ((*pQ15)[0] & 0x0FFFF); +#endif + + *pQ15 -= 2; + return (val); +} + +/** + @brief Write 2 Q15 to Q15 pointer and increment pointer afterwards. + @param[in] pQ15 points to input value + @param[in] value Q31 value + @return none + */ +__STATIC_FORCEINLINE void write_q15x2_ia ( + q15_t ** pQ15, + q31_t value) +{ + q31_t val = value; +#ifdef __ARM_FEATURE_UNALIGNED + memcpy (*pQ15, &val, 4); +#else + (*pQ15)[0] = (val & 0x0FFFF); + (*pQ15)[1] = (val >> 16) & 0x0FFFF; +#endif + + *pQ15 += 2; +} + +/** + @brief Write 2 Q15 to Q15 pointer. + @param[in] pQ15 points to input value + @param[in] value Q31 value + @return none + */ +__STATIC_FORCEINLINE void write_q15x2 ( + q15_t * pQ15, + q31_t value) +{ + q31_t val = value; + +#ifdef __ARM_FEATURE_UNALIGNED + memcpy (pQ15, &val, 4); +#else + pQ15[0] = val & 0x0FFFF; + pQ15[1] = val >> 16; +#endif +} + + +/** + @brief Read 4 Q7 from Q7 pointer and increment pointer afterwards. + @param[in] pQ7 points to input value + @return Q31 value + */ +__STATIC_FORCEINLINE q31_t read_q7x4_ia ( + q7_t ** pQ7) +{ + q31_t val; + + +#ifdef __ARM_FEATURE_UNALIGNED + memcpy (&val, *pQ7, 4); +#else + val =(((*pQ7)[3] & 0x0FF) << 24) | (((*pQ7)[2] & 0x0FF) << 16) | (((*pQ7)[1] & 0x0FF) << 8) | ((*pQ7)[0] & 0x0FF); +#endif + + *pQ7 += 4; + + return (val); +} + +/** + @brief Read 4 Q7 from Q7 pointer and decrement pointer afterwards. + @param[in] pQ7 points to input value + @return Q31 value + */ +__STATIC_FORCEINLINE q31_t read_q7x4_da ( + q7_t ** pQ7) +{ + q31_t val; +#ifdef __ARM_FEATURE_UNALIGNED + memcpy (&val, *pQ7, 4); +#else + val = ((((*pQ7)[3]) & 0x0FF) << 24) | ((((*pQ7)[2]) & 0x0FF) << 16) | ((((*pQ7)[1]) & 0x0FF) << 8) | ((*pQ7)[0] & 0x0FF); +#endif + *pQ7 -= 4; + + return (val); +} + +/** + @brief Write 4 Q7 to Q7 pointer and increment pointer afterwards. + @param[in] pQ7 points to input value + @param[in] value Q31 value + @return none + */ +__STATIC_FORCEINLINE void write_q7x4_ia ( + q7_t ** pQ7, + q31_t value) +{ + q31_t val = value; +#ifdef __ARM_FEATURE_UNALIGNED + memcpy (*pQ7, &val, 4); +#else + (*pQ7)[0] = val & 0x0FF; + (*pQ7)[1] = (val >> 8) & 0x0FF; + (*pQ7)[2] = (val >> 16) & 0x0FF; + (*pQ7)[3] = (val >> 24) & 0x0FF; + +#endif + *pQ7 += 4; +} + +/* + +Normally those kind of definitions are in a compiler file +in Core or Core_A. + +But for MSVC compiler it is a bit special. The goal is very specific +to CMSIS-DSP and only to allow the use of this library from other +systems like Python or Matlab. + +MSVC is not going to be used to cross-compile to ARM. So, having a MSVC +compiler file in Core or Core_A would not make sense. + +*/ +#if defined ( _MSC_VER ) || defined(__GNUC_PYTHON__) + __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t data) + { + if (data == 0U) { return 32U; } + + uint32_t count = 0U; + uint32_t mask = 0x80000000U; + + while ((data & mask) == 0U) + { + count += 1U; + mask = mask >> 1U; + } + return count; + } + + __STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) + { + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; + } + + __STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) + { + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; + } +#endif + +#ifndef ARM_MATH_DSP + /** + * @brief definition to pack two 16 bit values. + */ + #define __PKHBT(ARG1, ARG2, ARG3) ( (((int32_t)(ARG1) << 0) & (int32_t)0x0000FFFF) | \ + (((int32_t)(ARG2) << ARG3) & (int32_t)0xFFFF0000) ) + #define __PKHTB(ARG1, ARG2, ARG3) ( (((int32_t)(ARG1) << 0) & (int32_t)0xFFFF0000) | \ + (((int32_t)(ARG2) >> ARG3) & (int32_t)0x0000FFFF) ) +#endif + + /** + * @brief definition to pack four 8 bit values. + */ +#ifndef ARM_MATH_BIG_ENDIAN + #define __PACKq7(v0,v1,v2,v3) ( (((int32_t)(v0) << 0) & (int32_t)0x000000FF) | \ + (((int32_t)(v1) << 8) & (int32_t)0x0000FF00) | \ + (((int32_t)(v2) << 16) & (int32_t)0x00FF0000) | \ + (((int32_t)(v3) << 24) & (int32_t)0xFF000000) ) +#else + #define __PACKq7(v0,v1,v2,v3) ( (((int32_t)(v3) << 0) & (int32_t)0x000000FF) | \ + (((int32_t)(v2) << 8) & (int32_t)0x0000FF00) | \ + (((int32_t)(v1) << 16) & (int32_t)0x00FF0000) | \ + (((int32_t)(v0) << 24) & (int32_t)0xFF000000) ) +#endif + + + /** + * @brief Clips Q63 to Q31 values. + */ + __STATIC_FORCEINLINE q31_t clip_q63_to_q31( + q63_t x) + { + return ((q31_t) (x >> 32) != ((q31_t) x >> 31)) ? + ((0x7FFFFFFF ^ ((q31_t) (x >> 63)))) : (q31_t) x; + } + + /** + * @brief Clips Q63 to Q15 values. + */ + __STATIC_FORCEINLINE q15_t clip_q63_to_q15( + q63_t x) + { + return ((q31_t) (x >> 32) != ((q31_t) x >> 31)) ? + ((0x7FFF ^ ((q15_t) (x >> 63)))) : (q15_t) (x >> 15); + } + + /** + * @brief Clips Q31 to Q7 values. + */ + __STATIC_FORCEINLINE q7_t clip_q31_to_q7( + q31_t x) + { + return ((q31_t) (x >> 24) != ((q31_t) x >> 23)) ? + ((0x7F ^ ((q7_t) (x >> 31)))) : (q7_t) x; + } + + /** + * @brief Clips Q31 to Q15 values. + */ + __STATIC_FORCEINLINE q15_t clip_q31_to_q15( + q31_t x) + { + return ((q31_t) (x >> 16) != ((q31_t) x >> 15)) ? + ((0x7FFF ^ ((q15_t) (x >> 31)))) : (q15_t) x; + } + + /** + * @brief Multiplies 32 X 64 and returns 32 bit result in 2.30 format. + */ + __STATIC_FORCEINLINE q63_t mult32x64( + q63_t x, + q31_t y) + { + return ((((q63_t) (x & 0x00000000FFFFFFFF) * y) >> 32) + + (((q63_t) (x >> 32) * y) ) ); + } + + /** + * @brief Function to Calculates 1/in (reciprocal) value of Q31 Data type. + */ + __STATIC_FORCEINLINE uint32_t arm_recip_q31( + q31_t in, + q31_t * dst, + const q31_t * pRecipTable) + { + q31_t out; + uint32_t tempVal; + uint32_t index, i; + uint32_t signBits; + + if (in > 0) + { + signBits = ((uint32_t) (__CLZ( in) - 1)); + } + else + { + signBits = ((uint32_t) (__CLZ(-in) - 1)); + } + + /* Convert input sample to 1.31 format */ + in = (in << signBits); + + /* calculation of index for initial approximated Val */ + index = (uint32_t)(in >> 24); + index = (index & INDEX_MASK); + + /* 1.31 with exp 1 */ + out = pRecipTable[index]; + + /* calculation of reciprocal value */ + /* running approximation for two iterations */ + for (i = 0U; i < 2U; i++) + { + tempVal = (uint32_t) (((q63_t) in * out) >> 31); + tempVal = 0x7FFFFFFFu - tempVal; + /* 1.31 with exp 1 */ + /* out = (q31_t) (((q63_t) out * tempVal) >> 30); */ + out = clip_q63_to_q31(((q63_t) out * tempVal) >> 30); + } + + /* write output */ + *dst = out; + + /* return num of signbits of out = 1/in value */ + return (signBits + 1U); + } + + + /** + * @brief Function to Calculates 1/in (reciprocal) value of Q15 Data type. + */ + __STATIC_FORCEINLINE uint32_t arm_recip_q15( + q15_t in, + q15_t * dst, + const q15_t * pRecipTable) + { + q15_t out = 0; + uint32_t tempVal = 0; + uint32_t index = 0, i = 0; + uint32_t signBits = 0; + + if (in > 0) + { + signBits = ((uint32_t)(__CLZ( in) - 17)); + } + else + { + signBits = ((uint32_t)(__CLZ(-in) - 17)); + } + + /* Convert input sample to 1.15 format */ + in = (in << signBits); + + /* calculation of index for initial approximated Val */ + index = (uint32_t)(in >> 8); + index = (index & INDEX_MASK); + + /* 1.15 with exp 1 */ + out = pRecipTable[index]; + + /* calculation of reciprocal value */ + /* running approximation for two iterations */ + for (i = 0U; i < 2U; i++) + { + tempVal = (uint32_t) (((q31_t) in * out) >> 15); + tempVal = 0x7FFFu - tempVal; + /* 1.15 with exp 1 */ + out = (q15_t) (((q31_t) out * tempVal) >> 14); + /* out = clip_q31_to_q15(((q31_t) out * tempVal) >> 14); */ + } + + /* write output */ + *dst = out; + + /* return num of signbits of out = 1/in value */ + return (signBits + 1); + } + +/** + * @brief Integer exponentiation + * @param[in] x value + * @param[in] nb integer exponent >= 1 + * @return x^nb + * + */ +__STATIC_INLINE float32_t arm_exponent_f32(float32_t x, int32_t nb) +{ + float32_t r = x; + nb --; + while(nb > 0) + { + r = r * x; + nb--; + } + return(r); +} + +/** + * @brief 64-bit to 32-bit unsigned normalization + * @param[in] in is input unsigned long long value + * @param[out] normalized is the 32-bit normalized value + * @param[out] norm is norm scale + */ +__STATIC_INLINE void arm_norm_64_to_32u(uint64_t in, int32_t * normalized, int32_t *norm) +{ + int32_t n1; + int32_t hi = (int32_t) (in >> 32); + int32_t lo = (int32_t) ((in << 32) >> 32); + + n1 = __CLZ(hi) - 32; + if (!n1) + { + /* + * input fits in 32-bit + */ + n1 = __CLZ(lo); + if (!n1) + { + /* + * MSB set, need to scale down by 1 + */ + *norm = -1; + *normalized = (((uint32_t) lo) >> 1); + } else + { + if (n1 == 32) + { + /* + * input is zero + */ + *norm = 0; + *normalized = 0; + } else + { + /* + * 32-bit normalization + */ + *norm = n1 - 1; + *normalized = lo << *norm; + } + } + } else + { + /* + * input fits in 64-bit + */ + n1 = 1 - n1; + *norm = -n1; + /* + * 64 bit normalization + */ + *normalized = (((uint32_t) lo) >> n1) | (hi << (32 - n1)); + } +} + +__STATIC_INLINE q31_t arm_div_q63_to_q31(q63_t num, q31_t den) +{ + q31_t result; + uint64_t absNum; + int32_t normalized; + int32_t norm; + + /* + * if sum fits in 32bits + * avoid costly 64-bit division + */ + absNum = num > 0 ? num : -num; + arm_norm_64_to_32u(absNum, &normalized, &norm); + if (norm > 0) + /* + * 32-bit division + */ + result = (q31_t) num / den; + else + /* + * 64-bit division + */ + result = (q31_t) (num / den); + + return result; +} + + +/* + * @brief C custom defined intrinsic functions + */ +#if !defined (ARM_MATH_DSP) + + /* + * @brief C custom defined QADD8 + */ + __STATIC_FORCEINLINE uint32_t __QADD8( + uint32_t x, + uint32_t y) + { + q31_t r, s, t, u; + + r = __SSAT(((((q31_t)x << 24) >> 24) + (((q31_t)y << 24) >> 24)), 8) & (int32_t)0x000000FF; + s = __SSAT(((((q31_t)x << 16) >> 24) + (((q31_t)y << 16) >> 24)), 8) & (int32_t)0x000000FF; + t = __SSAT(((((q31_t)x << 8) >> 24) + (((q31_t)y << 8) >> 24)), 8) & (int32_t)0x000000FF; + u = __SSAT(((((q31_t)x ) >> 24) + (((q31_t)y ) >> 24)), 8) & (int32_t)0x000000FF; + + return ((uint32_t)((u << 24) | (t << 16) | (s << 8) | (r ))); + } + + + /* + * @brief C custom defined QSUB8 + */ + __STATIC_FORCEINLINE uint32_t __QSUB8( + uint32_t x, + uint32_t y) + { + q31_t r, s, t, u; + + r = __SSAT(((((q31_t)x << 24) >> 24) - (((q31_t)y << 24) >> 24)), 8) & (int32_t)0x000000FF; + s = __SSAT(((((q31_t)x << 16) >> 24) - (((q31_t)y << 16) >> 24)), 8) & (int32_t)0x000000FF; + t = __SSAT(((((q31_t)x << 8) >> 24) - (((q31_t)y << 8) >> 24)), 8) & (int32_t)0x000000FF; + u = __SSAT(((((q31_t)x ) >> 24) - (((q31_t)y ) >> 24)), 8) & (int32_t)0x000000FF; + + return ((uint32_t)((u << 24) | (t << 16) | (s << 8) | (r ))); + } + + + /* + * @brief C custom defined QADD16 + */ + __STATIC_FORCEINLINE uint32_t __QADD16( + uint32_t x, + uint32_t y) + { +/* q31_t r, s; without initialisation 'arm_offset_q15 test' fails but 'intrinsic' tests pass! for armCC */ + q31_t r = 0, s = 0; + + r = __SSAT(((((q31_t)x << 16) >> 16) + (((q31_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF; + s = __SSAT(((((q31_t)x ) >> 16) + (((q31_t)y ) >> 16)), 16) & (int32_t)0x0000FFFF; + + return ((uint32_t)((s << 16) | (r ))); + } + + + /* + * @brief C custom defined SHADD16 + */ + __STATIC_FORCEINLINE uint32_t __SHADD16( + uint32_t x, + uint32_t y) + { + q31_t r, s; + + r = (((((q31_t)x << 16) >> 16) + (((q31_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF; + s = (((((q31_t)x ) >> 16) + (((q31_t)y ) >> 16)) >> 1) & (int32_t)0x0000FFFF; + + return ((uint32_t)((s << 16) | (r ))); + } + + + /* + * @brief C custom defined QSUB16 + */ + __STATIC_FORCEINLINE uint32_t __QSUB16( + uint32_t x, + uint32_t y) + { + q31_t r, s; + + r = __SSAT(((((q31_t)x << 16) >> 16) - (((q31_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF; + s = __SSAT(((((q31_t)x ) >> 16) - (((q31_t)y ) >> 16)), 16) & (int32_t)0x0000FFFF; + + return ((uint32_t)((s << 16) | (r ))); + } + + + /* + * @brief C custom defined SHSUB16 + */ + __STATIC_FORCEINLINE uint32_t __SHSUB16( + uint32_t x, + uint32_t y) + { + q31_t r, s; + + r = (((((q31_t)x << 16) >> 16) - (((q31_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF; + s = (((((q31_t)x ) >> 16) - (((q31_t)y ) >> 16)) >> 1) & (int32_t)0x0000FFFF; + + return ((uint32_t)((s << 16) | (r ))); + } + + + /* + * @brief C custom defined QASX + */ + __STATIC_FORCEINLINE uint32_t __QASX( + uint32_t x, + uint32_t y) + { + q31_t r, s; + + r = __SSAT(((((q31_t)x << 16) >> 16) - (((q31_t)y ) >> 16)), 16) & (int32_t)0x0000FFFF; + s = __SSAT(((((q31_t)x ) >> 16) + (((q31_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF; + + return ((uint32_t)((s << 16) | (r ))); + } + + + /* + * @brief C custom defined SHASX + */ + __STATIC_FORCEINLINE uint32_t __SHASX( + uint32_t x, + uint32_t y) + { + q31_t r, s; + + r = (((((q31_t)x << 16) >> 16) - (((q31_t)y ) >> 16)) >> 1) & (int32_t)0x0000FFFF; + s = (((((q31_t)x ) >> 16) + (((q31_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF; + + return ((uint32_t)((s << 16) | (r ))); + } + + + /* + * @brief C custom defined QSAX + */ + __STATIC_FORCEINLINE uint32_t __QSAX( + uint32_t x, + uint32_t y) + { + q31_t r, s; + + r = __SSAT(((((q31_t)x << 16) >> 16) + (((q31_t)y ) >> 16)), 16) & (int32_t)0x0000FFFF; + s = __SSAT(((((q31_t)x ) >> 16) - (((q31_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF; + + return ((uint32_t)((s << 16) | (r ))); + } + + + /* + * @brief C custom defined SHSAX + */ + __STATIC_FORCEINLINE uint32_t __SHSAX( + uint32_t x, + uint32_t y) + { + q31_t r, s; + + r = (((((q31_t)x << 16) >> 16) + (((q31_t)y ) >> 16)) >> 1) & (int32_t)0x0000FFFF; + s = (((((q31_t)x ) >> 16) - (((q31_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF; + + return ((uint32_t)((s << 16) | (r ))); + } + + + /* + * @brief C custom defined SMUSDX + */ + __STATIC_FORCEINLINE uint32_t __SMUSDX( + uint32_t x, + uint32_t y) + { + return ((uint32_t)(((((q31_t)x << 16) >> 16) * (((q31_t)y ) >> 16)) - + ((((q31_t)x ) >> 16) * (((q31_t)y << 16) >> 16)) )); + } + + /* + * @brief C custom defined SMUADX + */ + __STATIC_FORCEINLINE uint32_t __SMUADX( + uint32_t x, + uint32_t y) + { + return ((uint32_t)(((((q31_t)x << 16) >> 16) * (((q31_t)y ) >> 16)) + + ((((q31_t)x ) >> 16) * (((q31_t)y << 16) >> 16)) )); + } + + + /* + * @brief C custom defined QADD + */ + __STATIC_FORCEINLINE int32_t __QADD( + int32_t x, + int32_t y) + { + return ((int32_t)(clip_q63_to_q31((q63_t)x + (q31_t)y))); + } + + + /* + * @brief C custom defined QSUB + */ + __STATIC_FORCEINLINE int32_t __QSUB( + int32_t x, + int32_t y) + { + return ((int32_t)(clip_q63_to_q31((q63_t)x - (q31_t)y))); + } + + + /* + * @brief C custom defined SMLAD + */ + __STATIC_FORCEINLINE uint32_t __SMLAD( + uint32_t x, + uint32_t y, + uint32_t sum) + { + return ((uint32_t)(((((q31_t)x << 16) >> 16) * (((q31_t)y << 16) >> 16)) + + ((((q31_t)x ) >> 16) * (((q31_t)y ) >> 16)) + + ( ((q31_t)sum ) ) )); + } + + + /* + * @brief C custom defined SMLADX + */ + __STATIC_FORCEINLINE uint32_t __SMLADX( + uint32_t x, + uint32_t y, + uint32_t sum) + { + return ((uint32_t)(((((q31_t)x << 16) >> 16) * (((q31_t)y ) >> 16)) + + ((((q31_t)x ) >> 16) * (((q31_t)y << 16) >> 16)) + + ( ((q31_t)sum ) ) )); + } + + + /* + * @brief C custom defined SMLSDX + */ + __STATIC_FORCEINLINE uint32_t __SMLSDX( + uint32_t x, + uint32_t y, + uint32_t sum) + { + return ((uint32_t)(((((q31_t)x << 16) >> 16) * (((q31_t)y ) >> 16)) - + ((((q31_t)x ) >> 16) * (((q31_t)y << 16) >> 16)) + + ( ((q31_t)sum ) ) )); + } + + + /* + * @brief C custom defined SMLALD + */ + __STATIC_FORCEINLINE uint64_t __SMLALD( + uint32_t x, + uint32_t y, + uint64_t sum) + { +/* return (sum + ((q15_t) (x >> 16) * (q15_t) (y >> 16)) + ((q15_t) x * (q15_t) y)); */ + return ((uint64_t)(((((q31_t)x << 16) >> 16) * (((q31_t)y << 16) >> 16)) + + ((((q31_t)x ) >> 16) * (((q31_t)y ) >> 16)) + + ( ((q63_t)sum ) ) )); + } + + + /* + * @brief C custom defined SMLALDX + */ + __STATIC_FORCEINLINE uint64_t __SMLALDX( + uint32_t x, + uint32_t y, + uint64_t sum) + { +/* return (sum + ((q15_t) (x >> 16) * (q15_t) y)) + ((q15_t) x * (q15_t) (y >> 16)); */ + return ((uint64_t)(((((q31_t)x << 16) >> 16) * (((q31_t)y ) >> 16)) + + ((((q31_t)x ) >> 16) * (((q31_t)y << 16) >> 16)) + + ( ((q63_t)sum ) ) )); + } + + + /* + * @brief C custom defined SMUAD + */ + __STATIC_FORCEINLINE uint32_t __SMUAD( + uint32_t x, + uint32_t y) + { + return ((uint32_t)(((((q31_t)x << 16) >> 16) * (((q31_t)y << 16) >> 16)) + + ((((q31_t)x ) >> 16) * (((q31_t)y ) >> 16)) )); + } + + + /* + * @brief C custom defined SMUSD + */ + __STATIC_FORCEINLINE uint32_t __SMUSD( + uint32_t x, + uint32_t y) + { + return ((uint32_t)(((((q31_t)x << 16) >> 16) * (((q31_t)y << 16) >> 16)) - + ((((q31_t)x ) >> 16) * (((q31_t)y ) >> 16)) )); + } + + + /* + * @brief C custom defined SXTB16 + */ + __STATIC_FORCEINLINE uint32_t __SXTB16( + uint32_t x) + { + return ((uint32_t)(((((q31_t)x << 24) >> 24) & (q31_t)0x0000FFFF) | + ((((q31_t)x << 8) >> 8) & (q31_t)0xFFFF0000) )); + } + + /* + * @brief C custom defined SMMLA + */ + __STATIC_FORCEINLINE int32_t __SMMLA( + int32_t x, + int32_t y, + int32_t sum) + { + return (sum + (int32_t) (((int64_t) x * y) >> 32)); + } + +#endif /* !defined (ARM_MATH_DSP) */ + + + /** + * @brief Instance structure for the Q7 FIR filter. + */ + typedef struct + { + uint16_t numTaps; /**< number of filter coefficients in the filter. */ + q7_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ + const q7_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/ + } arm_fir_instance_q7; + + /** + * @brief Instance structure for the Q15 FIR filter. + */ + typedef struct + { + uint16_t numTaps; /**< number of filter coefficients in the filter. */ + q15_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ + const q15_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/ + } arm_fir_instance_q15; + + /** + * @brief Instance structure for the Q31 FIR filter. + */ + typedef struct + { + uint16_t numTaps; /**< number of filter coefficients in the filter. */ + q31_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ + const q31_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */ + } arm_fir_instance_q31; + + /** + * @brief Instance structure for the floating-point FIR filter. + */ + typedef struct + { + uint16_t numTaps; /**< number of filter coefficients in the filter. */ + float32_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ + const float32_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */ + } arm_fir_instance_f32; + + /** + * @brief Processing function for the Q7 FIR filter. + * @param[in] S points to an instance of the Q7 FIR filter structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data. + * @param[in] blockSize number of samples to process. + */ + void arm_fir_q7( + const arm_fir_instance_q7 * S, + const q7_t * pSrc, + q7_t * pDst, + uint32_t blockSize); + + /** + * @brief Initialization function for the Q7 FIR filter. + * @param[in,out] S points to an instance of the Q7 FIR structure. + * @param[in] numTaps Number of filter coefficients in the filter. + * @param[in] pCoeffs points to the filter coefficients. + * @param[in] pState points to the state buffer. + * @param[in] blockSize number of samples that are processed. + */ + void arm_fir_init_q7( + arm_fir_instance_q7 * S, + uint16_t numTaps, + const q7_t * pCoeffs, + q7_t * pState, + uint32_t blockSize); + + /** + * @brief Processing function for the Q15 FIR filter. + * @param[in] S points to an instance of the Q15 FIR structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data. + * @param[in] blockSize number of samples to process. + */ + void arm_fir_q15( + const arm_fir_instance_q15 * S, + const q15_t * pSrc, + q15_t * pDst, + uint32_t blockSize); + + /** + * @brief Processing function for the fast Q15 FIR filter (fast version). + * @param[in] S points to an instance of the Q15 FIR filter structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data. + * @param[in] blockSize number of samples to process. + */ + void arm_fir_fast_q15( + const arm_fir_instance_q15 * S, + const q15_t * pSrc, + q15_t * pDst, + uint32_t blockSize); + + /** + * @brief Initialization function for the Q15 FIR filter. + * @param[in,out] S points to an instance of the Q15 FIR filter structure. + * @param[in] numTaps Number of filter coefficients in the filter. Must be even and greater than or equal to 4. + * @param[in] pCoeffs points to the filter coefficients. + * @param[in] pState points to the state buffer. + * @param[in] blockSize number of samples that are processed at a time. + * @return The function returns either + * ARM_MATH_SUCCESS if initialization was successful or + * ARM_MATH_ARGUMENT_ERROR if numTaps is not a supported value. + */ + arm_status arm_fir_init_q15( + arm_fir_instance_q15 * S, + uint16_t numTaps, + const q15_t * pCoeffs, + q15_t * pState, + uint32_t blockSize); + + /** + * @brief Processing function for the Q31 FIR filter. + * @param[in] S points to an instance of the Q31 FIR filter structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data. + * @param[in] blockSize number of samples to process. + */ + void arm_fir_q31( + const arm_fir_instance_q31 * S, + const q31_t * pSrc, + q31_t * pDst, + uint32_t blockSize); + + /** + * @brief Processing function for the fast Q31 FIR filter (fast version). + * @param[in] S points to an instance of the Q31 FIR filter structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data. + * @param[in] blockSize number of samples to process. + */ + void arm_fir_fast_q31( + const arm_fir_instance_q31 * S, + const q31_t * pSrc, + q31_t * pDst, + uint32_t blockSize); + + /** + * @brief Initialization function for the Q31 FIR filter. + * @param[in,out] S points to an instance of the Q31 FIR structure. + * @param[in] numTaps Number of filter coefficients in the filter. + * @param[in] pCoeffs points to the filter coefficients. + * @param[in] pState points to the state buffer. + * @param[in] blockSize number of samples that are processed at a time. + */ + void arm_fir_init_q31( + arm_fir_instance_q31 * S, + uint16_t numTaps, + const q31_t * pCoeffs, + q31_t * pState, + uint32_t blockSize); + + /** + * @brief Processing function for the floating-point FIR filter. + * @param[in] S points to an instance of the floating-point FIR structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data. + * @param[in] blockSize number of samples to process. + */ + void arm_fir_f32( + const arm_fir_instance_f32 * S, + const float32_t * pSrc, + float32_t * pDst, + uint32_t blockSize); + + /** + * @brief Initialization function for the floating-point FIR filter. + * @param[in,out] S points to an instance of the floating-point FIR filter structure. + * @param[in] numTaps Number of filter coefficients in the filter. + * @param[in] pCoeffs points to the filter coefficients. + * @param[in] pState points to the state buffer. + * @param[in] blockSize number of samples that are processed at a time. + */ + void arm_fir_init_f32( + arm_fir_instance_f32 * S, + uint16_t numTaps, + const float32_t * pCoeffs, + float32_t * pState, + uint32_t blockSize); + + /** + * @brief Instance structure for the Q15 Biquad cascade filter. + */ + typedef struct + { + int8_t numStages; /**< number of 2nd order stages in the filter. Overall order is 2*numStages. */ + q15_t *pState; /**< Points to the array of state coefficients. The array is of length 4*numStages. */ + const q15_t *pCoeffs; /**< Points to the array of coefficients. The array is of length 5*numStages. */ + int8_t postShift; /**< Additional shift, in bits, applied to each output sample. */ + } arm_biquad_casd_df1_inst_q15; + + /** + * @brief Instance structure for the Q31 Biquad cascade filter. + */ + typedef struct + { + uint32_t numStages; /**< number of 2nd order stages in the filter. Overall order is 2*numStages. */ + q31_t *pState; /**< Points to the array of state coefficients. The array is of length 4*numStages. */ + const q31_t *pCoeffs; /**< Points to the array of coefficients. The array is of length 5*numStages. */ + uint8_t postShift; /**< Additional shift, in bits, applied to each output sample. */ + } arm_biquad_casd_df1_inst_q31; + + /** + * @brief Instance structure for the floating-point Biquad cascade filter. + */ + typedef struct + { + uint32_t numStages; /**< number of 2nd order stages in the filter. Overall order is 2*numStages. */ + float32_t *pState; /**< Points to the array of state coefficients. The array is of length 4*numStages. */ + const float32_t *pCoeffs; /**< Points to the array of coefficients. The array is of length 5*numStages. */ + } arm_biquad_casd_df1_inst_f32; + +#if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) + /** + * @brief Instance structure for the modified Biquad coefs required by vectorized code. + */ + typedef struct + { + float32_t coeffs[8][4]; /**< Points to the array of modified coefficients. The array is of length 32. There is one per stage */ + } arm_biquad_mod_coef_f32; +#endif + + /** + * @brief Processing function for the Q15 Biquad cascade filter. + * @param[in] S points to an instance of the Q15 Biquad cascade structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data. + * @param[in] blockSize number of samples to process. + */ + void arm_biquad_cascade_df1_q15( + const arm_biquad_casd_df1_inst_q15 * S, + const q15_t * pSrc, + q15_t * pDst, + uint32_t blockSize); + + /** + * @brief Initialization function for the Q15 Biquad cascade filter. + * @param[in,out] S points to an instance of the Q15 Biquad cascade structure. + * @param[in] numStages number of 2nd order stages in the filter. + * @param[in] pCoeffs points to the filter coefficients. + * @param[in] pState points to the state buffer. + * @param[in] postShift Shift to be applied to the output. Varies according to the coefficients format + */ + void arm_biquad_cascade_df1_init_q15( + arm_biquad_casd_df1_inst_q15 * S, + uint8_t numStages, + const q15_t * pCoeffs, + q15_t * pState, + int8_t postShift); + + /** + * @brief Fast but less precise processing function for the Q15 Biquad cascade filter for Cortex-M3 and Cortex-M4. + * @param[in] S points to an instance of the Q15 Biquad cascade structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data. + * @param[in] blockSize number of samples to process. + */ + void arm_biquad_cascade_df1_fast_q15( + const arm_biquad_casd_df1_inst_q15 * S, + const q15_t * pSrc, + q15_t * pDst, + uint32_t blockSize); + + /** + * @brief Processing function for the Q31 Biquad cascade filter + * @param[in] S points to an instance of the Q31 Biquad cascade structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data. + * @param[in] blockSize number of samples to process. + */ + void arm_biquad_cascade_df1_q31( + const arm_biquad_casd_df1_inst_q31 * S, + const q31_t * pSrc, + q31_t * pDst, + uint32_t blockSize); + + /** + * @brief Fast but less precise processing function for the Q31 Biquad cascade filter for Cortex-M3 and Cortex-M4. + * @param[in] S points to an instance of the Q31 Biquad cascade structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data. + * @param[in] blockSize number of samples to process. + */ + void arm_biquad_cascade_df1_fast_q31( + const arm_biquad_casd_df1_inst_q31 * S, + const q31_t * pSrc, + q31_t * pDst, + uint32_t blockSize); + + /** + * @brief Initialization function for the Q31 Biquad cascade filter. + * @param[in,out] S points to an instance of the Q31 Biquad cascade structure. + * @param[in] numStages number of 2nd order stages in the filter. + * @param[in] pCoeffs points to the filter coefficients. + * @param[in] pState points to the state buffer. + * @param[in] postShift Shift to be applied to the output. Varies according to the coefficients format + */ + void arm_biquad_cascade_df1_init_q31( + arm_biquad_casd_df1_inst_q31 * S, + uint8_t numStages, + const q31_t * pCoeffs, + q31_t * pState, + int8_t postShift); + + /** + * @brief Processing function for the floating-point Biquad cascade filter. + * @param[in] S points to an instance of the floating-point Biquad cascade structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data. + * @param[in] blockSize number of samples to process. + */ + void arm_biquad_cascade_df1_f32( + const arm_biquad_casd_df1_inst_f32 * S, + const float32_t * pSrc, + float32_t * pDst, + uint32_t blockSize); + + /** + * @brief Initialization function for the floating-point Biquad cascade filter. + * @param[in,out] S points to an instance of the floating-point Biquad cascade structure. + * @param[in] numStages number of 2nd order stages in the filter. + * @param[in] pCoeffs points to the filter coefficients. + * @param[in] pCoeffsMod points to the modified filter coefficients (only MVE version). + * @param[in] pState points to the state buffer. + */ +#if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) + void arm_biquad_cascade_df1_mve_init_f32( + arm_biquad_casd_df1_inst_f32 * S, + uint8_t numStages, + const float32_t * pCoeffs, + arm_biquad_mod_coef_f32 * pCoeffsMod, + float32_t * pState); +#endif + + void arm_biquad_cascade_df1_init_f32( + arm_biquad_casd_df1_inst_f32 * S, + uint8_t numStages, + const float32_t * pCoeffs, + float32_t * pState); + + + /** + * @brief Compute the logical bitwise AND of two fixed-point vectors. + * @param[in] pSrcA points to input vector A + * @param[in] pSrcB points to input vector B + * @param[out] pDst points to output vector + * @param[in] blockSize number of samples in each vector + * @return none + */ + void arm_and_u16( + const uint16_t * pSrcA, + const uint16_t * pSrcB, + uint16_t * pDst, + uint32_t blockSize); + + /** + * @brief Compute the logical bitwise AND of two fixed-point vectors. + * @param[in] pSrcA points to input vector A + * @param[in] pSrcB points to input vector B + * @param[out] pDst points to output vector + * @param[in] blockSize number of samples in each vector + * @return none + */ + void arm_and_u32( + const uint32_t * pSrcA, + const uint32_t * pSrcB, + uint32_t * pDst, + uint32_t blockSize); + + /** + * @brief Compute the logical bitwise AND of two fixed-point vectors. + * @param[in] pSrcA points to input vector A + * @param[in] pSrcB points to input vector B + * @param[out] pDst points to output vector + * @param[in] blockSize number of samples in each vector + * @return none + */ + void arm_and_u8( + const uint8_t * pSrcA, + const uint8_t * pSrcB, + uint8_t * pDst, + uint32_t blockSize); + + /** + * @brief Compute the logical bitwise OR of two fixed-point vectors. + * @param[in] pSrcA points to input vector A + * @param[in] pSrcB points to input vector B + * @param[out] pDst points to output vector + * @param[in] blockSize number of samples in each vector + * @return none + */ + void arm_or_u16( + const uint16_t * pSrcA, + const uint16_t * pSrcB, + uint16_t * pDst, + uint32_t blockSize); + + /** + * @brief Compute the logical bitwise OR of two fixed-point vectors. + * @param[in] pSrcA points to input vector A + * @param[in] pSrcB points to input vector B + * @param[out] pDst points to output vector + * @param[in] blockSize number of samples in each vector + * @return none + */ + void arm_or_u32( + const uint32_t * pSrcA, + const uint32_t * pSrcB, + uint32_t * pDst, + uint32_t blockSize); + + /** + * @brief Compute the logical bitwise OR of two fixed-point vectors. + * @param[in] pSrcA points to input vector A + * @param[in] pSrcB points to input vector B + * @param[out] pDst points to output vector + * @param[in] blockSize number of samples in each vector + * @return none + */ + void arm_or_u8( + const uint8_t * pSrcA, + const uint8_t * pSrcB, + uint8_t * pDst, + uint32_t blockSize); + + /** + * @brief Compute the logical bitwise NOT of a fixed-point vector. + * @param[in] pSrc points to input vector + * @param[out] pDst points to output vector + * @param[in] blockSize number of samples in each vector + * @return none + */ + void arm_not_u16( + const uint16_t * pSrc, + uint16_t * pDst, + uint32_t blockSize); + + /** + * @brief Compute the logical bitwise NOT of a fixed-point vector. + * @param[in] pSrc points to input vector + * @param[out] pDst points to output vector + * @param[in] blockSize number of samples in each vector + * @return none + */ + void arm_not_u32( + const uint32_t * pSrc, + uint32_t * pDst, + uint32_t blockSize); + + /** + * @brief Compute the logical bitwise NOT of a fixed-point vector. + * @param[in] pSrc points to input vector + * @param[out] pDst points to output vector + * @param[in] blockSize number of samples in each vector + * @return none + */ + void arm_not_u8( + const uint8_t * pSrc, + uint8_t * pDst, + uint32_t blockSize); + +/** + * @brief Compute the logical bitwise XOR of two fixed-point vectors. + * @param[in] pSrcA points to input vector A + * @param[in] pSrcB points to input vector B + * @param[out] pDst points to output vector + * @param[in] blockSize number of samples in each vector + * @return none + */ + void arm_xor_u16( + const uint16_t * pSrcA, + const uint16_t * pSrcB, + uint16_t * pDst, + uint32_t blockSize); + + /** + * @brief Compute the logical bitwise XOR of two fixed-point vectors. + * @param[in] pSrcA points to input vector A + * @param[in] pSrcB points to input vector B + * @param[out] pDst points to output vector + * @param[in] blockSize number of samples in each vector + * @return none + */ + void arm_xor_u32( + const uint32_t * pSrcA, + const uint32_t * pSrcB, + uint32_t * pDst, + uint32_t blockSize); + + /** + * @brief Compute the logical bitwise XOR of two fixed-point vectors. + * @param[in] pSrcA points to input vector A + * @param[in] pSrcB points to input vector B + * @param[out] pDst points to output vector + * @param[in] blockSize number of samples in each vector + * @return none + */ + void arm_xor_u8( + const uint8_t * pSrcA, + const uint8_t * pSrcB, + uint8_t * pDst, + uint32_t blockSize); + + /** + * @brief Struct for specifying sorting algorithm + */ + typedef enum + { + ARM_SORT_BITONIC = 0, + /**< Bitonic sort */ + ARM_SORT_BUBBLE = 1, + /**< Bubble sort */ + ARM_SORT_HEAP = 2, + /**< Heap sort */ + ARM_SORT_INSERTION = 3, + /**< Insertion sort */ + ARM_SORT_QUICK = 4, + /**< Quick sort */ + ARM_SORT_SELECTION = 5 + /**< Selection sort */ + } arm_sort_alg; + + /** + * @brief Struct for specifying sorting algorithm + */ + typedef enum + { + ARM_SORT_DESCENDING = 0, + /**< Descending order (9 to 0) */ + ARM_SORT_ASCENDING = 1 + /**< Ascending order (0 to 9) */ + } arm_sort_dir; + + /** + * @brief Instance structure for the sorting algorithms. + */ + typedef struct + { + arm_sort_alg alg; /**< Sorting algorithm selected */ + arm_sort_dir dir; /**< Sorting order (direction) */ + } arm_sort_instance_f32; + + /** + * @param[in] S points to an instance of the sorting structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data. + * @param[in] blockSize number of samples to process. + */ + void arm_sort_f32( + const arm_sort_instance_f32 * S, + float32_t * pSrc, + float32_t * pDst, + uint32_t blockSize); + + /** + * @param[in,out] S points to an instance of the sorting structure. + * @param[in] alg Selected algorithm. + * @param[in] dir Sorting order. + */ + void arm_sort_init_f32( + arm_sort_instance_f32 * S, + arm_sort_alg alg, + arm_sort_dir dir); + + /** + * @brief Instance structure for the sorting algorithms. + */ + typedef struct + { + arm_sort_dir dir; /**< Sorting order (direction) */ + float32_t * buffer; /**< Working buffer */ + } arm_merge_sort_instance_f32; + + /** + * @param[in] S points to an instance of the sorting structure. + * @param[in,out] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data + * @param[in] blockSize number of samples to process. + */ + void arm_merge_sort_f32( + const arm_merge_sort_instance_f32 * S, + float32_t *pSrc, + float32_t *pDst, + uint32_t blockSize); + + /** + * @param[in,out] S points to an instance of the sorting structure. + * @param[in] dir Sorting order. + * @param[in] buffer Working buffer. + */ + void arm_merge_sort_init_f32( + arm_merge_sort_instance_f32 * S, + arm_sort_dir dir, + float32_t * buffer); + + /** + * @brief Struct for specifying cubic spline type + */ + typedef enum + { + ARM_SPLINE_NATURAL = 0, /**< Natural spline */ + ARM_SPLINE_PARABOLIC_RUNOUT = 1 /**< Parabolic runout spline */ + } arm_spline_type; + + /** + * @brief Instance structure for the floating-point cubic spline interpolation. + */ + typedef struct + { + arm_spline_type type; /**< Type (boundary conditions) */ + const float32_t * x; /**< x values */ + const float32_t * y; /**< y values */ + uint32_t n_x; /**< Number of known data points */ + float32_t * coeffs; /**< Coefficients buffer (b,c, and d) */ + } arm_spline_instance_f32; + + /** + * @brief Processing function for the floating-point cubic spline interpolation. + * @param[in] S points to an instance of the floating-point spline structure. + * @param[in] xq points to the x values ot the interpolated data points. + * @param[out] pDst points to the block of output data. + * @param[in] blockSize number of samples of output data. + */ + void arm_spline_f32( + arm_spline_instance_f32 * S, + const float32_t * xq, + float32_t * pDst, + uint32_t blockSize); + + /** + * @brief Initialization function for the floating-point cubic spline interpolation. + * @param[in,out] S points to an instance of the floating-point spline structure. + * @param[in] type type of cubic spline interpolation (boundary conditions) + * @param[in] x points to the x values of the known data points. + * @param[in] y points to the y values of the known data points. + * @param[in] n number of known data points. + * @param[in] coeffs coefficients array for b, c, and d + * @param[in] tempBuffer buffer array for internal computations + */ + void arm_spline_init_f32( + arm_spline_instance_f32 * S, + arm_spline_type type, + const float32_t * x, + const float32_t * y, + uint32_t n, + float32_t * coeffs, + float32_t * tempBuffer); + + /** + * @brief Instance structure for the floating-point matrix structure. + */ + typedef struct + { + uint16_t numRows; /**< number of rows of the matrix. */ + uint16_t numCols; /**< number of columns of the matrix. */ + float32_t *pData; /**< points to the data of the matrix. */ + } arm_matrix_instance_f32; + + /** + * @brief Instance structure for the floating-point matrix structure. + */ + typedef struct + { + uint16_t numRows; /**< number of rows of the matrix. */ + uint16_t numCols; /**< number of columns of the matrix. */ + float64_t *pData; /**< points to the data of the matrix. */ + } arm_matrix_instance_f64; + + /** + * @brief Instance structure for the Q15 matrix structure. + */ + typedef struct + { + uint16_t numRows; /**< number of rows of the matrix. */ + uint16_t numCols; /**< number of columns of the matrix. */ + q15_t *pData; /**< points to the data of the matrix. */ + } arm_matrix_instance_q15; + + /** + * @brief Instance structure for the Q31 matrix structure. + */ + typedef struct + { + uint16_t numRows; /**< number of rows of the matrix. */ + uint16_t numCols; /**< number of columns of the matrix. */ + q31_t *pData; /**< points to the data of the matrix. */ + } arm_matrix_instance_q31; + + /** + * @brief Floating-point matrix addition. + * @param[in] pSrcA points to the first input matrix structure + * @param[in] pSrcB points to the second input matrix structure + * @param[out] pDst points to output matrix structure + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + */ +arm_status arm_mat_add_f32( + const arm_matrix_instance_f32 * pSrcA, + const arm_matrix_instance_f32 * pSrcB, + arm_matrix_instance_f32 * pDst); + + /** + * @brief Q15 matrix addition. + * @param[in] pSrcA points to the first input matrix structure + * @param[in] pSrcB points to the second input matrix structure + * @param[out] pDst points to output matrix structure + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + */ +arm_status arm_mat_add_q15( + const arm_matrix_instance_q15 * pSrcA, + const arm_matrix_instance_q15 * pSrcB, + arm_matrix_instance_q15 * pDst); + + /** + * @brief Q31 matrix addition. + * @param[in] pSrcA points to the first input matrix structure + * @param[in] pSrcB points to the second input matrix structure + * @param[out] pDst points to output matrix structure + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + */ +arm_status arm_mat_add_q31( + const arm_matrix_instance_q31 * pSrcA, + const arm_matrix_instance_q31 * pSrcB, + arm_matrix_instance_q31 * pDst); + + /** + * @brief Floating-point, complex, matrix multiplication. + * @param[in] pSrcA points to the first input matrix structure + * @param[in] pSrcB points to the second input matrix structure + * @param[out] pDst points to output matrix structure + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + */ +arm_status arm_mat_cmplx_mult_f32( + const arm_matrix_instance_f32 * pSrcA, + const arm_matrix_instance_f32 * pSrcB, + arm_matrix_instance_f32 * pDst); + + /** + * @brief Q15, complex, matrix multiplication. + * @param[in] pSrcA points to the first input matrix structure + * @param[in] pSrcB points to the second input matrix structure + * @param[out] pDst points to output matrix structure + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + */ +arm_status arm_mat_cmplx_mult_q15( + const arm_matrix_instance_q15 * pSrcA, + const arm_matrix_instance_q15 * pSrcB, + arm_matrix_instance_q15 * pDst, + q15_t * pScratch); + + /** + * @brief Q31, complex, matrix multiplication. + * @param[in] pSrcA points to the first input matrix structure + * @param[in] pSrcB points to the second input matrix structure + * @param[out] pDst points to output matrix structure + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + */ +arm_status arm_mat_cmplx_mult_q31( + const arm_matrix_instance_q31 * pSrcA, + const arm_matrix_instance_q31 * pSrcB, + arm_matrix_instance_q31 * pDst); + + /** + * @brief Floating-point matrix transpose. + * @param[in] pSrc points to the input matrix + * @param[out] pDst points to the output matrix + * @return The function returns either ARM_MATH_SIZE_MISMATCH + * or ARM_MATH_SUCCESS based on the outcome of size checking. + */ +arm_status arm_mat_trans_f32( + const arm_matrix_instance_f32 * pSrc, + arm_matrix_instance_f32 * pDst); + + /** + * @brief Q15 matrix transpose. + * @param[in] pSrc points to the input matrix + * @param[out] pDst points to the output matrix + * @return The function returns either ARM_MATH_SIZE_MISMATCH + * or ARM_MATH_SUCCESS based on the outcome of size checking. + */ +arm_status arm_mat_trans_q15( + const arm_matrix_instance_q15 * pSrc, + arm_matrix_instance_q15 * pDst); + + /** + * @brief Q31 matrix transpose. + * @param[in] pSrc points to the input matrix + * @param[out] pDst points to the output matrix + * @return The function returns either ARM_MATH_SIZE_MISMATCH + * or ARM_MATH_SUCCESS based on the outcome of size checking. + */ +arm_status arm_mat_trans_q31( + const arm_matrix_instance_q31 * pSrc, + arm_matrix_instance_q31 * pDst); + + /** + * @brief Floating-point matrix multiplication + * @param[in] pSrcA points to the first input matrix structure + * @param[in] pSrcB points to the second input matrix structure + * @param[out] pDst points to output matrix structure + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + */ +arm_status arm_mat_mult_f32( + const arm_matrix_instance_f32 * pSrcA, + const arm_matrix_instance_f32 * pSrcB, + arm_matrix_instance_f32 * pDst); + + /** + * @brief Q15 matrix multiplication + * @param[in] pSrcA points to the first input matrix structure + * @param[in] pSrcB points to the second input matrix structure + * @param[out] pDst points to output matrix structure + * @param[in] pState points to the array for storing intermediate results + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + */ +arm_status arm_mat_mult_q15( + const arm_matrix_instance_q15 * pSrcA, + const arm_matrix_instance_q15 * pSrcB, + arm_matrix_instance_q15 * pDst, + q15_t * pState); + + /** + * @brief Q15 matrix multiplication (fast variant) for Cortex-M3 and Cortex-M4 + * @param[in] pSrcA points to the first input matrix structure + * @param[in] pSrcB points to the second input matrix structure + * @param[out] pDst points to output matrix structure + * @param[in] pState points to the array for storing intermediate results + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + */ +arm_status arm_mat_mult_fast_q15( + const arm_matrix_instance_q15 * pSrcA, + const arm_matrix_instance_q15 * pSrcB, + arm_matrix_instance_q15 * pDst, + q15_t * pState); + + /** + * @brief Q31 matrix multiplication + * @param[in] pSrcA points to the first input matrix structure + * @param[in] pSrcB points to the second input matrix structure + * @param[out] pDst points to output matrix structure + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + */ +arm_status arm_mat_mult_q31( + const arm_matrix_instance_q31 * pSrcA, + const arm_matrix_instance_q31 * pSrcB, + arm_matrix_instance_q31 * pDst); + + /** + * @brief Q31 matrix multiplication (fast variant) for Cortex-M3 and Cortex-M4 + * @param[in] pSrcA points to the first input matrix structure + * @param[in] pSrcB points to the second input matrix structure + * @param[out] pDst points to output matrix structure + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + */ +arm_status arm_mat_mult_fast_q31( + const arm_matrix_instance_q31 * pSrcA, + const arm_matrix_instance_q31 * pSrcB, + arm_matrix_instance_q31 * pDst); + + /** + * @brief Floating-point matrix subtraction + * @param[in] pSrcA points to the first input matrix structure + * @param[in] pSrcB points to the second input matrix structure + * @param[out] pDst points to output matrix structure + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + */ +arm_status arm_mat_sub_f32( + const arm_matrix_instance_f32 * pSrcA, + const arm_matrix_instance_f32 * pSrcB, + arm_matrix_instance_f32 * pDst); + + /** + * @brief Q15 matrix subtraction + * @param[in] pSrcA points to the first input matrix structure + * @param[in] pSrcB points to the second input matrix structure + * @param[out] pDst points to output matrix structure + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + */ +arm_status arm_mat_sub_q15( + const arm_matrix_instance_q15 * pSrcA, + const arm_matrix_instance_q15 * pSrcB, + arm_matrix_instance_q15 * pDst); + + /** + * @brief Q31 matrix subtraction + * @param[in] pSrcA points to the first input matrix structure + * @param[in] pSrcB points to the second input matrix structure + * @param[out] pDst points to output matrix structure + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + */ +arm_status arm_mat_sub_q31( + const arm_matrix_instance_q31 * pSrcA, + const arm_matrix_instance_q31 * pSrcB, + arm_matrix_instance_q31 * pDst); + + /** + * @brief Floating-point matrix scaling. + * @param[in] pSrc points to the input matrix + * @param[in] scale scale factor + * @param[out] pDst points to the output matrix + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + */ +arm_status arm_mat_scale_f32( + const arm_matrix_instance_f32 * pSrc, + float32_t scale, + arm_matrix_instance_f32 * pDst); + + /** + * @brief Q15 matrix scaling. + * @param[in] pSrc points to input matrix + * @param[in] scaleFract fractional portion of the scale factor + * @param[in] shift number of bits to shift the result by + * @param[out] pDst points to output matrix + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + */ +arm_status arm_mat_scale_q15( + const arm_matrix_instance_q15 * pSrc, + q15_t scaleFract, + int32_t shift, + arm_matrix_instance_q15 * pDst); + + /** + * @brief Q31 matrix scaling. + * @param[in] pSrc points to input matrix + * @param[in] scaleFract fractional portion of the scale factor + * @param[in] shift number of bits to shift the result by + * @param[out] pDst points to output matrix structure + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + */ +arm_status arm_mat_scale_q31( + const arm_matrix_instance_q31 * pSrc, + q31_t scaleFract, + int32_t shift, + arm_matrix_instance_q31 * pDst); + + /** + * @brief Q31 matrix initialization. + * @param[in,out] S points to an instance of the floating-point matrix structure. + * @param[in] nRows number of rows in the matrix. + * @param[in] nColumns number of columns in the matrix. + * @param[in] pData points to the matrix data array. + */ +void arm_mat_init_q31( + arm_matrix_instance_q31 * S, + uint16_t nRows, + uint16_t nColumns, + q31_t * pData); + + /** + * @brief Q15 matrix initialization. + * @param[in,out] S points to an instance of the floating-point matrix structure. + * @param[in] nRows number of rows in the matrix. + * @param[in] nColumns number of columns in the matrix. + * @param[in] pData points to the matrix data array. + */ +void arm_mat_init_q15( + arm_matrix_instance_q15 * S, + uint16_t nRows, + uint16_t nColumns, + q15_t * pData); + + /** + * @brief Floating-point matrix initialization. + * @param[in,out] S points to an instance of the floating-point matrix structure. + * @param[in] nRows number of rows in the matrix. + * @param[in] nColumns number of columns in the matrix. + * @param[in] pData points to the matrix data array. + */ +void arm_mat_init_f32( + arm_matrix_instance_f32 * S, + uint16_t nRows, + uint16_t nColumns, + float32_t * pData); + + + /** + * @brief Instance structure for the Q15 PID Control. + */ + typedef struct + { + q15_t A0; /**< The derived gain, A0 = Kp + Ki + Kd . */ +#if !defined (ARM_MATH_DSP) + q15_t A1; + q15_t A2; +#else + q31_t A1; /**< The derived gain A1 = -Kp - 2Kd | Kd.*/ +#endif + q15_t state[3]; /**< The state array of length 3. */ + q15_t Kp; /**< The proportional gain. */ + q15_t Ki; /**< The integral gain. */ + q15_t Kd; /**< The derivative gain. */ + } arm_pid_instance_q15; + + /** + * @brief Instance structure for the Q31 PID Control. + */ + typedef struct + { + q31_t A0; /**< The derived gain, A0 = Kp + Ki + Kd . */ + q31_t A1; /**< The derived gain, A1 = -Kp - 2Kd. */ + q31_t A2; /**< The derived gain, A2 = Kd . */ + q31_t state[3]; /**< The state array of length 3. */ + q31_t Kp; /**< The proportional gain. */ + q31_t Ki; /**< The integral gain. */ + q31_t Kd; /**< The derivative gain. */ + } arm_pid_instance_q31; + + /** + * @brief Instance structure for the floating-point PID Control. + */ + typedef struct + { + float32_t A0; /**< The derived gain, A0 = Kp + Ki + Kd . */ + float32_t A1; /**< The derived gain, A1 = -Kp - 2Kd. */ + float32_t A2; /**< The derived gain, A2 = Kd . */ + float32_t state[3]; /**< The state array of length 3. */ + float32_t Kp; /**< The proportional gain. */ + float32_t Ki; /**< The integral gain. */ + float32_t Kd; /**< The derivative gain. */ + } arm_pid_instance_f32; + + + + /** + * @brief Initialization function for the floating-point PID Control. + * @param[in,out] S points to an instance of the PID structure. + * @param[in] resetStateFlag flag to reset the state. 0 = no change in state 1 = reset the state. + */ + void arm_pid_init_f32( + arm_pid_instance_f32 * S, + int32_t resetStateFlag); + + + /** + * @brief Reset function for the floating-point PID Control. + * @param[in,out] S is an instance of the floating-point PID Control structure + */ + void arm_pid_reset_f32( + arm_pid_instance_f32 * S); + + + /** + * @brief Initialization function for the Q31 PID Control. + * @param[in,out] S points to an instance of the Q15 PID structure. + * @param[in] resetStateFlag flag to reset the state. 0 = no change in state 1 = reset the state. + */ + void arm_pid_init_q31( + arm_pid_instance_q31 * S, + int32_t resetStateFlag); + + + /** + * @brief Reset function for the Q31 PID Control. + * @param[in,out] S points to an instance of the Q31 PID Control structure + */ + + void arm_pid_reset_q31( + arm_pid_instance_q31 * S); + + + /** + * @brief Initialization function for the Q15 PID Control. + * @param[in,out] S points to an instance of the Q15 PID structure. + * @param[in] resetStateFlag flag to reset the state. 0 = no change in state 1 = reset the state. + */ + void arm_pid_init_q15( + arm_pid_instance_q15 * S, + int32_t resetStateFlag); + + + /** + * @brief Reset function for the Q15 PID Control. + * @param[in,out] S points to an instance of the q15 PID Control structure + */ + void arm_pid_reset_q15( + arm_pid_instance_q15 * S); + + + /** + * @brief Instance structure for the floating-point Linear Interpolate function. + */ + typedef struct + { + uint32_t nValues; /**< nValues */ + float32_t x1; /**< x1 */ + float32_t xSpacing; /**< xSpacing */ + float32_t *pYData; /**< pointer to the table of Y values */ + } arm_linear_interp_instance_f32; + + /** + * @brief Instance structure for the floating-point bilinear interpolation function. + */ + typedef struct + { + uint16_t numRows; /**< number of rows in the data table. */ + uint16_t numCols; /**< number of columns in the data table. */ + float32_t *pData; /**< points to the data table. */ + } arm_bilinear_interp_instance_f32; + + /** + * @brief Instance structure for the Q31 bilinear interpolation function. + */ + typedef struct + { + uint16_t numRows; /**< number of rows in the data table. */ + uint16_t numCols; /**< number of columns in the data table. */ + q31_t *pData; /**< points to the data table. */ + } arm_bilinear_interp_instance_q31; + + /** + * @brief Instance structure for the Q15 bilinear interpolation function. + */ + typedef struct + { + uint16_t numRows; /**< number of rows in the data table. */ + uint16_t numCols; /**< number of columns in the data table. */ + q15_t *pData; /**< points to the data table. */ + } arm_bilinear_interp_instance_q15; + + /** + * @brief Instance structure for the Q15 bilinear interpolation function. + */ + typedef struct + { + uint16_t numRows; /**< number of rows in the data table. */ + uint16_t numCols; /**< number of columns in the data table. */ + q7_t *pData; /**< points to the data table. */ + } arm_bilinear_interp_instance_q7; + + + /** + * @brief Q7 vector multiplication. + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in each vector + */ + void arm_mult_q7( + const q7_t * pSrcA, + const q7_t * pSrcB, + q7_t * pDst, + uint32_t blockSize); + + + /** + * @brief Q15 vector multiplication. + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in each vector + */ + void arm_mult_q15( + const q15_t * pSrcA, + const q15_t * pSrcB, + q15_t * pDst, + uint32_t blockSize); + + + /** + * @brief Q31 vector multiplication. + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in each vector + */ + void arm_mult_q31( + const q31_t * pSrcA, + const q31_t * pSrcB, + q31_t * pDst, + uint32_t blockSize); + + + /** + * @brief Floating-point vector multiplication. + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in each vector + */ + void arm_mult_f32( + const float32_t * pSrcA, + const float32_t * pSrcB, + float32_t * pDst, + uint32_t blockSize); + + + /** + * @brief Instance structure for the Q15 CFFT/CIFFT function. + */ + typedef struct + { + uint16_t fftLen; /**< length of the FFT. */ + uint8_t ifftFlag; /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */ + uint8_t bitReverseFlag; /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */ + const q15_t *pTwiddle; /**< points to the Sin twiddle factor table. */ + const uint16_t *pBitRevTable; /**< points to the bit reversal table. */ + uint16_t twidCoefModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */ + uint16_t bitRevFactor; /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */ + } arm_cfft_radix2_instance_q15; + +/* Deprecated */ + arm_status arm_cfft_radix2_init_q15( + arm_cfft_radix2_instance_q15 * S, + uint16_t fftLen, + uint8_t ifftFlag, + uint8_t bitReverseFlag); + +/* Deprecated */ + void arm_cfft_radix2_q15( + const arm_cfft_radix2_instance_q15 * S, + q15_t * pSrc); + + + /** + * @brief Instance structure for the Q15 CFFT/CIFFT function. + */ + typedef struct + { + uint16_t fftLen; /**< length of the FFT. */ + uint8_t ifftFlag; /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */ + uint8_t bitReverseFlag; /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */ + const q15_t *pTwiddle; /**< points to the twiddle factor table. */ + const uint16_t *pBitRevTable; /**< points to the bit reversal table. */ + uint16_t twidCoefModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */ + uint16_t bitRevFactor; /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */ + } arm_cfft_radix4_instance_q15; + +/* Deprecated */ + arm_status arm_cfft_radix4_init_q15( + arm_cfft_radix4_instance_q15 * S, + uint16_t fftLen, + uint8_t ifftFlag, + uint8_t bitReverseFlag); + +/* Deprecated */ + void arm_cfft_radix4_q15( + const arm_cfft_radix4_instance_q15 * S, + q15_t * pSrc); + + /** + * @brief Instance structure for the Radix-2 Q31 CFFT/CIFFT function. + */ + typedef struct + { + uint16_t fftLen; /**< length of the FFT. */ + uint8_t ifftFlag; /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */ + uint8_t bitReverseFlag; /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */ + const q31_t *pTwiddle; /**< points to the Twiddle factor table. */ + const uint16_t *pBitRevTable; /**< points to the bit reversal table. */ + uint16_t twidCoefModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */ + uint16_t bitRevFactor; /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */ + } arm_cfft_radix2_instance_q31; + +/* Deprecated */ + arm_status arm_cfft_radix2_init_q31( + arm_cfft_radix2_instance_q31 * S, + uint16_t fftLen, + uint8_t ifftFlag, + uint8_t bitReverseFlag); + +/* Deprecated */ + void arm_cfft_radix2_q31( + const arm_cfft_radix2_instance_q31 * S, + q31_t * pSrc); + + /** + * @brief Instance structure for the Q31 CFFT/CIFFT function. + */ + typedef struct + { + uint16_t fftLen; /**< length of the FFT. */ + uint8_t ifftFlag; /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */ + uint8_t bitReverseFlag; /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */ + const q31_t *pTwiddle; /**< points to the twiddle factor table. */ + const uint16_t *pBitRevTable; /**< points to the bit reversal table. */ + uint16_t twidCoefModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */ + uint16_t bitRevFactor; /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */ + } arm_cfft_radix4_instance_q31; + +/* Deprecated */ + void arm_cfft_radix4_q31( + const arm_cfft_radix4_instance_q31 * S, + q31_t * pSrc); + +/* Deprecated */ + arm_status arm_cfft_radix4_init_q31( + arm_cfft_radix4_instance_q31 * S, + uint16_t fftLen, + uint8_t ifftFlag, + uint8_t bitReverseFlag); + + /** + * @brief Instance structure for the floating-point CFFT/CIFFT function. + */ + typedef struct + { + uint16_t fftLen; /**< length of the FFT. */ + uint8_t ifftFlag; /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */ + uint8_t bitReverseFlag; /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */ + const float32_t *pTwiddle; /**< points to the Twiddle factor table. */ + const uint16_t *pBitRevTable; /**< points to the bit reversal table. */ + uint16_t twidCoefModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */ + uint16_t bitRevFactor; /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */ + float32_t onebyfftLen; /**< value of 1/fftLen. */ + } arm_cfft_radix2_instance_f32; + +/* Deprecated */ + arm_status arm_cfft_radix2_init_f32( + arm_cfft_radix2_instance_f32 * S, + uint16_t fftLen, + uint8_t ifftFlag, + uint8_t bitReverseFlag); + +/* Deprecated */ + void arm_cfft_radix2_f32( + const arm_cfft_radix2_instance_f32 * S, + float32_t * pSrc); + + /** + * @brief Instance structure for the floating-point CFFT/CIFFT function. + */ + typedef struct + { + uint16_t fftLen; /**< length of the FFT. */ + uint8_t ifftFlag; /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */ + uint8_t bitReverseFlag; /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */ + const float32_t *pTwiddle; /**< points to the Twiddle factor table. */ + const uint16_t *pBitRevTable; /**< points to the bit reversal table. */ + uint16_t twidCoefModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */ + uint16_t bitRevFactor; /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */ + float32_t onebyfftLen; /**< value of 1/fftLen. */ + } arm_cfft_radix4_instance_f32; + +/* Deprecated */ + arm_status arm_cfft_radix4_init_f32( + arm_cfft_radix4_instance_f32 * S, + uint16_t fftLen, + uint8_t ifftFlag, + uint8_t bitReverseFlag); + +/* Deprecated */ + void arm_cfft_radix4_f32( + const arm_cfft_radix4_instance_f32 * S, + float32_t * pSrc); + + /** + * @brief Instance structure for the fixed-point CFFT/CIFFT function. + */ + typedef struct + { + uint16_t fftLen; /**< length of the FFT. */ + const q15_t *pTwiddle; /**< points to the Twiddle factor table. */ + const uint16_t *pBitRevTable; /**< points to the bit reversal table. */ + uint16_t bitRevLength; /**< bit reversal table length. */ +#if defined(ARM_MATH_MVEI) + const uint32_t *rearranged_twiddle_tab_stride1_arr; /**< Per stage reordered twiddle pointer (offset 1) */ \ + const uint32_t *rearranged_twiddle_tab_stride2_arr; /**< Per stage reordered twiddle pointer (offset 2) */ \ + const uint32_t *rearranged_twiddle_tab_stride3_arr; /**< Per stage reordered twiddle pointer (offset 3) */ \ + const q15_t *rearranged_twiddle_stride1; /**< reordered twiddle offset 1 storage */ \ + const q15_t *rearranged_twiddle_stride2; /**< reordered twiddle offset 2 storage */ \ + const q15_t *rearranged_twiddle_stride3; +#endif + } arm_cfft_instance_q15; + +arm_status arm_cfft_init_q15( + arm_cfft_instance_q15 * S, + uint16_t fftLen); + +void arm_cfft_q15( + const arm_cfft_instance_q15 * S, + q15_t * p1, + uint8_t ifftFlag, + uint8_t bitReverseFlag); + + /** + * @brief Instance structure for the fixed-point CFFT/CIFFT function. + */ + typedef struct + { + uint16_t fftLen; /**< length of the FFT. */ + const q31_t *pTwiddle; /**< points to the Twiddle factor table. */ + const uint16_t *pBitRevTable; /**< points to the bit reversal table. */ + uint16_t bitRevLength; /**< bit reversal table length. */ +#if defined(ARM_MATH_MVEI) + const uint32_t *rearranged_twiddle_tab_stride1_arr; /**< Per stage reordered twiddle pointer (offset 1) */ \ + const uint32_t *rearranged_twiddle_tab_stride2_arr; /**< Per stage reordered twiddle pointer (offset 2) */ \ + const uint32_t *rearranged_twiddle_tab_stride3_arr; /**< Per stage reordered twiddle pointer (offset 3) */ \ + const q31_t *rearranged_twiddle_stride1; /**< reordered twiddle offset 1 storage */ \ + const q31_t *rearranged_twiddle_stride2; /**< reordered twiddle offset 2 storage */ \ + const q31_t *rearranged_twiddle_stride3; +#endif + } arm_cfft_instance_q31; + +arm_status arm_cfft_init_q31( + arm_cfft_instance_q31 * S, + uint16_t fftLen); + +void arm_cfft_q31( + const arm_cfft_instance_q31 * S, + q31_t * p1, + uint8_t ifftFlag, + uint8_t bitReverseFlag); + + /** + * @brief Instance structure for the floating-point CFFT/CIFFT function. + */ + typedef struct + { + uint16_t fftLen; /**< length of the FFT. */ + const float32_t *pTwiddle; /**< points to the Twiddle factor table. */ + const uint16_t *pBitRevTable; /**< points to the bit reversal table. */ + uint16_t bitRevLength; /**< bit reversal table length. */ +#if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) + const uint32_t *rearranged_twiddle_tab_stride1_arr; /**< Per stage reordered twiddle pointer (offset 1) */ \ + const uint32_t *rearranged_twiddle_tab_stride2_arr; /**< Per stage reordered twiddle pointer (offset 2) */ \ + const uint32_t *rearranged_twiddle_tab_stride3_arr; /**< Per stage reordered twiddle pointer (offset 3) */ \ + const float32_t *rearranged_twiddle_stride1; /**< reordered twiddle offset 1 storage */ \ + const float32_t *rearranged_twiddle_stride2; /**< reordered twiddle offset 2 storage */ \ + const float32_t *rearranged_twiddle_stride3; +#endif + } arm_cfft_instance_f32; + + + arm_status arm_cfft_init_f32( + arm_cfft_instance_f32 * S, + uint16_t fftLen); + + void arm_cfft_f32( + const arm_cfft_instance_f32 * S, + float32_t * p1, + uint8_t ifftFlag, + uint8_t bitReverseFlag); + + + /** + * @brief Instance structure for the Double Precision Floating-point CFFT/CIFFT function. + */ + typedef struct + { + uint16_t fftLen; /**< length of the FFT. */ + const float64_t *pTwiddle; /**< points to the Twiddle factor table. */ + const uint16_t *pBitRevTable; /**< points to the bit reversal table. */ + uint16_t bitRevLength; /**< bit reversal table length. */ + } arm_cfft_instance_f64; + + void arm_cfft_f64( + const arm_cfft_instance_f64 * S, + float64_t * p1, + uint8_t ifftFlag, + uint8_t bitReverseFlag); + + /** + * @brief Instance structure for the Q15 RFFT/RIFFT function. + */ + typedef struct + { + uint32_t fftLenReal; /**< length of the real FFT. */ + uint8_t ifftFlagR; /**< flag that selects forward (ifftFlagR=0) or inverse (ifftFlagR=1) transform. */ + uint8_t bitReverseFlagR; /**< flag that enables (bitReverseFlagR=1) or disables (bitReverseFlagR=0) bit reversal of output. */ + uint32_t twidCoefRModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */ + const q15_t *pTwiddleAReal; /**< points to the real twiddle factor table. */ + const q15_t *pTwiddleBReal; /**< points to the imag twiddle factor table. */ +#if defined(ARM_MATH_MVEI) + arm_cfft_instance_q15 cfftInst; +#else + const arm_cfft_instance_q15 *pCfft; /**< points to the complex FFT instance. */ +#endif + } arm_rfft_instance_q15; + + arm_status arm_rfft_init_q15( + arm_rfft_instance_q15 * S, + uint32_t fftLenReal, + uint32_t ifftFlagR, + uint32_t bitReverseFlag); + + void arm_rfft_q15( + const arm_rfft_instance_q15 * S, + q15_t * pSrc, + q15_t * pDst); + + /** + * @brief Instance structure for the Q31 RFFT/RIFFT function. + */ + typedef struct + { + uint32_t fftLenReal; /**< length of the real FFT. */ + uint8_t ifftFlagR; /**< flag that selects forward (ifftFlagR=0) or inverse (ifftFlagR=1) transform. */ + uint8_t bitReverseFlagR; /**< flag that enables (bitReverseFlagR=1) or disables (bitReverseFlagR=0) bit reversal of output. */ + uint32_t twidCoefRModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */ + const q31_t *pTwiddleAReal; /**< points to the real twiddle factor table. */ + const q31_t *pTwiddleBReal; /**< points to the imag twiddle factor table. */ +#if defined(ARM_MATH_MVEI) + arm_cfft_instance_q31 cfftInst; +#else + const arm_cfft_instance_q31 *pCfft; /**< points to the complex FFT instance. */ +#endif + } arm_rfft_instance_q31; + + arm_status arm_rfft_init_q31( + arm_rfft_instance_q31 * S, + uint32_t fftLenReal, + uint32_t ifftFlagR, + uint32_t bitReverseFlag); + + void arm_rfft_q31( + const arm_rfft_instance_q31 * S, + q31_t * pSrc, + q31_t * pDst); + + /** + * @brief Instance structure for the floating-point RFFT/RIFFT function. + */ + typedef struct + { + uint32_t fftLenReal; /**< length of the real FFT. */ + uint16_t fftLenBy2; /**< length of the complex FFT. */ + uint8_t ifftFlagR; /**< flag that selects forward (ifftFlagR=0) or inverse (ifftFlagR=1) transform. */ + uint8_t bitReverseFlagR; /**< flag that enables (bitReverseFlagR=1) or disables (bitReverseFlagR=0) bit reversal of output. */ + uint32_t twidCoefRModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */ + const float32_t *pTwiddleAReal; /**< points to the real twiddle factor table. */ + const float32_t *pTwiddleBReal; /**< points to the imag twiddle factor table. */ + arm_cfft_radix4_instance_f32 *pCfft; /**< points to the complex FFT instance. */ + } arm_rfft_instance_f32; + + arm_status arm_rfft_init_f32( + arm_rfft_instance_f32 * S, + arm_cfft_radix4_instance_f32 * S_CFFT, + uint32_t fftLenReal, + uint32_t ifftFlagR, + uint32_t bitReverseFlag); + + void arm_rfft_f32( + const arm_rfft_instance_f32 * S, + float32_t * pSrc, + float32_t * pDst); + + /** + * @brief Instance structure for the Double Precision Floating-point RFFT/RIFFT function. + */ +typedef struct + { + arm_cfft_instance_f64 Sint; /**< Internal CFFT structure. */ + uint16_t fftLenRFFT; /**< length of the real sequence */ + const float64_t * pTwiddleRFFT; /**< Twiddle factors real stage */ + } arm_rfft_fast_instance_f64 ; + +arm_status arm_rfft_fast_init_f64 ( + arm_rfft_fast_instance_f64 * S, + uint16_t fftLen); + + +void arm_rfft_fast_f64( + arm_rfft_fast_instance_f64 * S, + float64_t * p, float64_t * pOut, + uint8_t ifftFlag); + + + /** + * @brief Instance structure for the floating-point RFFT/RIFFT function. + */ +typedef struct + { + arm_cfft_instance_f32 Sint; /**< Internal CFFT structure. */ + uint16_t fftLenRFFT; /**< length of the real sequence */ + const float32_t * pTwiddleRFFT; /**< Twiddle factors real stage */ + } arm_rfft_fast_instance_f32 ; + +arm_status arm_rfft_fast_init_f32 ( + arm_rfft_fast_instance_f32 * S, + uint16_t fftLen); + + + void arm_rfft_fast_f32( + const arm_rfft_fast_instance_f32 * S, + float32_t * p, float32_t * pOut, + uint8_t ifftFlag); + + /** + * @brief Instance structure for the floating-point DCT4/IDCT4 function. + */ + typedef struct + { + uint16_t N; /**< length of the DCT4. */ + uint16_t Nby2; /**< half of the length of the DCT4. */ + float32_t normalize; /**< normalizing factor. */ + const float32_t *pTwiddle; /**< points to the twiddle factor table. */ + const float32_t *pCosFactor; /**< points to the cosFactor table. */ + arm_rfft_instance_f32 *pRfft; /**< points to the real FFT instance. */ + arm_cfft_radix4_instance_f32 *pCfft; /**< points to the complex FFT instance. */ + } arm_dct4_instance_f32; + + + /** + * @brief Initialization function for the floating-point DCT4/IDCT4. + * @param[in,out] S points to an instance of floating-point DCT4/IDCT4 structure. + * @param[in] S_RFFT points to an instance of floating-point RFFT/RIFFT structure. + * @param[in] S_CFFT points to an instance of floating-point CFFT/CIFFT structure. + * @param[in] N length of the DCT4. + * @param[in] Nby2 half of the length of the DCT4. + * @param[in] normalize normalizing factor. + * @return arm_status function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_ARGUMENT_ERROR if fftLenReal is not a supported transform length. + */ + arm_status arm_dct4_init_f32( + arm_dct4_instance_f32 * S, + arm_rfft_instance_f32 * S_RFFT, + arm_cfft_radix4_instance_f32 * S_CFFT, + uint16_t N, + uint16_t Nby2, + float32_t normalize); + + + /** + * @brief Processing function for the floating-point DCT4/IDCT4. + * @param[in] S points to an instance of the floating-point DCT4/IDCT4 structure. + * @param[in] pState points to state buffer. + * @param[in,out] pInlineBuffer points to the in-place input and output buffer. + */ + void arm_dct4_f32( + const arm_dct4_instance_f32 * S, + float32_t * pState, + float32_t * pInlineBuffer); + + + /** + * @brief Instance structure for the Q31 DCT4/IDCT4 function. + */ + typedef struct + { + uint16_t N; /**< length of the DCT4. */ + uint16_t Nby2; /**< half of the length of the DCT4. */ + q31_t normalize; /**< normalizing factor. */ + const q31_t *pTwiddle; /**< points to the twiddle factor table. */ + const q31_t *pCosFactor; /**< points to the cosFactor table. */ + arm_rfft_instance_q31 *pRfft; /**< points to the real FFT instance. */ + arm_cfft_radix4_instance_q31 *pCfft; /**< points to the complex FFT instance. */ + } arm_dct4_instance_q31; + + + /** + * @brief Initialization function for the Q31 DCT4/IDCT4. + * @param[in,out] S points to an instance of Q31 DCT4/IDCT4 structure. + * @param[in] S_RFFT points to an instance of Q31 RFFT/RIFFT structure + * @param[in] S_CFFT points to an instance of Q31 CFFT/CIFFT structure + * @param[in] N length of the DCT4. + * @param[in] Nby2 half of the length of the DCT4. + * @param[in] normalize normalizing factor. + * @return arm_status function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_ARGUMENT_ERROR if N is not a supported transform length. + */ + arm_status arm_dct4_init_q31( + arm_dct4_instance_q31 * S, + arm_rfft_instance_q31 * S_RFFT, + arm_cfft_radix4_instance_q31 * S_CFFT, + uint16_t N, + uint16_t Nby2, + q31_t normalize); + + + /** + * @brief Processing function for the Q31 DCT4/IDCT4. + * @param[in] S points to an instance of the Q31 DCT4 structure. + * @param[in] pState points to state buffer. + * @param[in,out] pInlineBuffer points to the in-place input and output buffer. + */ + void arm_dct4_q31( + const arm_dct4_instance_q31 * S, + q31_t * pState, + q31_t * pInlineBuffer); + + + /** + * @brief Instance structure for the Q15 DCT4/IDCT4 function. + */ + typedef struct + { + uint16_t N; /**< length of the DCT4. */ + uint16_t Nby2; /**< half of the length of the DCT4. */ + q15_t normalize; /**< normalizing factor. */ + const q15_t *pTwiddle; /**< points to the twiddle factor table. */ + const q15_t *pCosFactor; /**< points to the cosFactor table. */ + arm_rfft_instance_q15 *pRfft; /**< points to the real FFT instance. */ + arm_cfft_radix4_instance_q15 *pCfft; /**< points to the complex FFT instance. */ + } arm_dct4_instance_q15; + + + /** + * @brief Initialization function for the Q15 DCT4/IDCT4. + * @param[in,out] S points to an instance of Q15 DCT4/IDCT4 structure. + * @param[in] S_RFFT points to an instance of Q15 RFFT/RIFFT structure. + * @param[in] S_CFFT points to an instance of Q15 CFFT/CIFFT structure. + * @param[in] N length of the DCT4. + * @param[in] Nby2 half of the length of the DCT4. + * @param[in] normalize normalizing factor. + * @return arm_status function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_ARGUMENT_ERROR if N is not a supported transform length. + */ + arm_status arm_dct4_init_q15( + arm_dct4_instance_q15 * S, + arm_rfft_instance_q15 * S_RFFT, + arm_cfft_radix4_instance_q15 * S_CFFT, + uint16_t N, + uint16_t Nby2, + q15_t normalize); + + + /** + * @brief Processing function for the Q15 DCT4/IDCT4. + * @param[in] S points to an instance of the Q15 DCT4 structure. + * @param[in] pState points to state buffer. + * @param[in,out] pInlineBuffer points to the in-place input and output buffer. + */ + void arm_dct4_q15( + const arm_dct4_instance_q15 * S, + q15_t * pState, + q15_t * pInlineBuffer); + + + /** + * @brief Floating-point vector addition. + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in each vector + */ + void arm_add_f32( + const float32_t * pSrcA, + const float32_t * pSrcB, + float32_t * pDst, + uint32_t blockSize); + + + /** + * @brief Q7 vector addition. + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in each vector + */ + void arm_add_q7( + const q7_t * pSrcA, + const q7_t * pSrcB, + q7_t * pDst, + uint32_t blockSize); + + + /** + * @brief Q15 vector addition. + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in each vector + */ + void arm_add_q15( + const q15_t * pSrcA, + const q15_t * pSrcB, + q15_t * pDst, + uint32_t blockSize); + + + /** + * @brief Q31 vector addition. + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in each vector + */ + void arm_add_q31( + const q31_t * pSrcA, + const q31_t * pSrcB, + q31_t * pDst, + uint32_t blockSize); + + + /** + * @brief Floating-point vector subtraction. + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in each vector + */ + void arm_sub_f32( + const float32_t * pSrcA, + const float32_t * pSrcB, + float32_t * pDst, + uint32_t blockSize); + + + /** + * @brief Q7 vector subtraction. + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in each vector + */ + void arm_sub_q7( + const q7_t * pSrcA, + const q7_t * pSrcB, + q7_t * pDst, + uint32_t blockSize); + + + /** + * @brief Q15 vector subtraction. + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in each vector + */ + void arm_sub_q15( + const q15_t * pSrcA, + const q15_t * pSrcB, + q15_t * pDst, + uint32_t blockSize); + + + /** + * @brief Q31 vector subtraction. + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in each vector + */ + void arm_sub_q31( + const q31_t * pSrcA, + const q31_t * pSrcB, + q31_t * pDst, + uint32_t blockSize); + + + /** + * @brief Multiplies a floating-point vector by a scalar. + * @param[in] pSrc points to the input vector + * @param[in] scale scale factor to be applied + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in the vector + */ + void arm_scale_f32( + const float32_t * pSrc, + float32_t scale, + float32_t * pDst, + uint32_t blockSize); + + + /** + * @brief Multiplies a Q7 vector by a scalar. + * @param[in] pSrc points to the input vector + * @param[in] scaleFract fractional portion of the scale value + * @param[in] shift number of bits to shift the result by + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in the vector + */ + void arm_scale_q7( + const q7_t * pSrc, + q7_t scaleFract, + int8_t shift, + q7_t * pDst, + uint32_t blockSize); + + + /** + * @brief Multiplies a Q15 vector by a scalar. + * @param[in] pSrc points to the input vector + * @param[in] scaleFract fractional portion of the scale value + * @param[in] shift number of bits to shift the result by + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in the vector + */ + void arm_scale_q15( + const q15_t * pSrc, + q15_t scaleFract, + int8_t shift, + q15_t * pDst, + uint32_t blockSize); + + + /** + * @brief Multiplies a Q31 vector by a scalar. + * @param[in] pSrc points to the input vector + * @param[in] scaleFract fractional portion of the scale value + * @param[in] shift number of bits to shift the result by + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in the vector + */ + void arm_scale_q31( + const q31_t * pSrc, + q31_t scaleFract, + int8_t shift, + q31_t * pDst, + uint32_t blockSize); + + + /** + * @brief Q7 vector absolute value. + * @param[in] pSrc points to the input buffer + * @param[out] pDst points to the output buffer + * @param[in] blockSize number of samples in each vector + */ + void arm_abs_q7( + const q7_t * pSrc, + q7_t * pDst, + uint32_t blockSize); + + + /** + * @brief Floating-point vector absolute value. + * @param[in] pSrc points to the input buffer + * @param[out] pDst points to the output buffer + * @param[in] blockSize number of samples in each vector + */ + void arm_abs_f32( + const float32_t * pSrc, + float32_t * pDst, + uint32_t blockSize); + + + /** + * @brief Q15 vector absolute value. + * @param[in] pSrc points to the input buffer + * @param[out] pDst points to the output buffer + * @param[in] blockSize number of samples in each vector + */ + void arm_abs_q15( + const q15_t * pSrc, + q15_t * pDst, + uint32_t blockSize); + + + /** + * @brief Q31 vector absolute value. + * @param[in] pSrc points to the input buffer + * @param[out] pDst points to the output buffer + * @param[in] blockSize number of samples in each vector + */ + void arm_abs_q31( + const q31_t * pSrc, + q31_t * pDst, + uint32_t blockSize); + + + /** + * @brief Dot product of floating-point vectors. + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[in] blockSize number of samples in each vector + * @param[out] result output result returned here + */ + void arm_dot_prod_f32( + const float32_t * pSrcA, + const float32_t * pSrcB, + uint32_t blockSize, + float32_t * result); + + + /** + * @brief Dot product of Q7 vectors. + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[in] blockSize number of samples in each vector + * @param[out] result output result returned here + */ + void arm_dot_prod_q7( + const q7_t * pSrcA, + const q7_t * pSrcB, + uint32_t blockSize, + q31_t * result); + + + /** + * @brief Dot product of Q15 vectors. + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[in] blockSize number of samples in each vector + * @param[out] result output result returned here + */ + void arm_dot_prod_q15( + const q15_t * pSrcA, + const q15_t * pSrcB, + uint32_t blockSize, + q63_t * result); + + + /** + * @brief Dot product of Q31 vectors. + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[in] blockSize number of samples in each vector + * @param[out] result output result returned here + */ + void arm_dot_prod_q31( + const q31_t * pSrcA, + const q31_t * pSrcB, + uint32_t blockSize, + q63_t * result); + + + /** + * @brief Shifts the elements of a Q7 vector a specified number of bits. + * @param[in] pSrc points to the input vector + * @param[in] shiftBits number of bits to shift. A positive value shifts left; a negative value shifts right. + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in the vector + */ + void arm_shift_q7( + const q7_t * pSrc, + int8_t shiftBits, + q7_t * pDst, + uint32_t blockSize); + + + /** + * @brief Shifts the elements of a Q15 vector a specified number of bits. + * @param[in] pSrc points to the input vector + * @param[in] shiftBits number of bits to shift. A positive value shifts left; a negative value shifts right. + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in the vector + */ + void arm_shift_q15( + const q15_t * pSrc, + int8_t shiftBits, + q15_t * pDst, + uint32_t blockSize); + + + /** + * @brief Shifts the elements of a Q31 vector a specified number of bits. + * @param[in] pSrc points to the input vector + * @param[in] shiftBits number of bits to shift. A positive value shifts left; a negative value shifts right. + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in the vector + */ + void arm_shift_q31( + const q31_t * pSrc, + int8_t shiftBits, + q31_t * pDst, + uint32_t blockSize); + + + /** + * @brief Adds a constant offset to a floating-point vector. + * @param[in] pSrc points to the input vector + * @param[in] offset is the offset to be added + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in the vector + */ + void arm_offset_f32( + const float32_t * pSrc, + float32_t offset, + float32_t * pDst, + uint32_t blockSize); + + + /** + * @brief Adds a constant offset to a Q7 vector. + * @param[in] pSrc points to the input vector + * @param[in] offset is the offset to be added + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in the vector + */ + void arm_offset_q7( + const q7_t * pSrc, + q7_t offset, + q7_t * pDst, + uint32_t blockSize); + + + /** + * @brief Adds a constant offset to a Q15 vector. + * @param[in] pSrc points to the input vector + * @param[in] offset is the offset to be added + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in the vector + */ + void arm_offset_q15( + const q15_t * pSrc, + q15_t offset, + q15_t * pDst, + uint32_t blockSize); + + + /** + * @brief Adds a constant offset to a Q31 vector. + * @param[in] pSrc points to the input vector + * @param[in] offset is the offset to be added + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in the vector + */ + void arm_offset_q31( + const q31_t * pSrc, + q31_t offset, + q31_t * pDst, + uint32_t blockSize); + + + /** + * @brief Negates the elements of a floating-point vector. + * @param[in] pSrc points to the input vector + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in the vector + */ + void arm_negate_f32( + const float32_t * pSrc, + float32_t * pDst, + uint32_t blockSize); + + + /** + * @brief Negates the elements of a Q7 vector. + * @param[in] pSrc points to the input vector + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in the vector + */ + void arm_negate_q7( + const q7_t * pSrc, + q7_t * pDst, + uint32_t blockSize); + + + /** + * @brief Negates the elements of a Q15 vector. + * @param[in] pSrc points to the input vector + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in the vector + */ + void arm_negate_q15( + const q15_t * pSrc, + q15_t * pDst, + uint32_t blockSize); + + + /** + * @brief Negates the elements of a Q31 vector. + * @param[in] pSrc points to the input vector + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in the vector + */ + void arm_negate_q31( + const q31_t * pSrc, + q31_t * pDst, + uint32_t blockSize); + + + /** + * @brief Copies the elements of a floating-point vector. + * @param[in] pSrc input pointer + * @param[out] pDst output pointer + * @param[in] blockSize number of samples to process + */ + void arm_copy_f32( + const float32_t * pSrc, + float32_t * pDst, + uint32_t blockSize); + + + /** + * @brief Copies the elements of a Q7 vector. + * @param[in] pSrc input pointer + * @param[out] pDst output pointer + * @param[in] blockSize number of samples to process + */ + void arm_copy_q7( + const q7_t * pSrc, + q7_t * pDst, + uint32_t blockSize); + + + /** + * @brief Copies the elements of a Q15 vector. + * @param[in] pSrc input pointer + * @param[out] pDst output pointer + * @param[in] blockSize number of samples to process + */ + void arm_copy_q15( + const q15_t * pSrc, + q15_t * pDst, + uint32_t blockSize); + + + /** + * @brief Copies the elements of a Q31 vector. + * @param[in] pSrc input pointer + * @param[out] pDst output pointer + * @param[in] blockSize number of samples to process + */ + void arm_copy_q31( + const q31_t * pSrc, + q31_t * pDst, + uint32_t blockSize); + + + /** + * @brief Fills a constant value into a floating-point vector. + * @param[in] value input value to be filled + * @param[out] pDst output pointer + * @param[in] blockSize number of samples to process + */ + void arm_fill_f32( + float32_t value, + float32_t * pDst, + uint32_t blockSize); + + + /** + * @brief Fills a constant value into a Q7 vector. + * @param[in] value input value to be filled + * @param[out] pDst output pointer + * @param[in] blockSize number of samples to process + */ + void arm_fill_q7( + q7_t value, + q7_t * pDst, + uint32_t blockSize); + + + /** + * @brief Fills a constant value into a Q15 vector. + * @param[in] value input value to be filled + * @param[out] pDst output pointer + * @param[in] blockSize number of samples to process + */ + void arm_fill_q15( + q15_t value, + q15_t * pDst, + uint32_t blockSize); + + + /** + * @brief Fills a constant value into a Q31 vector. + * @param[in] value input value to be filled + * @param[out] pDst output pointer + * @param[in] blockSize number of samples to process + */ + void arm_fill_q31( + q31_t value, + q31_t * pDst, + uint32_t blockSize); + + +/** + * @brief Convolution of floating-point sequences. + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the location where the output result is written. Length srcALen+srcBLen-1. + */ + void arm_conv_f32( + const float32_t * pSrcA, + uint32_t srcALen, + const float32_t * pSrcB, + uint32_t srcBLen, + float32_t * pDst); + + + /** + * @brief Convolution of Q15 sequences. + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the block of output data Length srcALen+srcBLen-1. + * @param[in] pScratch1 points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2. + * @param[in] pScratch2 points to scratch buffer of size min(srcALen, srcBLen). + */ + void arm_conv_opt_q15( + const q15_t * pSrcA, + uint32_t srcALen, + const q15_t * pSrcB, + uint32_t srcBLen, + q15_t * pDst, + q15_t * pScratch1, + q15_t * pScratch2); + + +/** + * @brief Convolution of Q15 sequences. + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the location where the output result is written. Length srcALen+srcBLen-1. + */ + void arm_conv_q15( + const q15_t * pSrcA, + uint32_t srcALen, + const q15_t * pSrcB, + uint32_t srcBLen, + q15_t * pDst); + + + /** + * @brief Convolution of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4 + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the block of output data Length srcALen+srcBLen-1. + */ + void arm_conv_fast_q15( + const q15_t * pSrcA, + uint32_t srcALen, + const q15_t * pSrcB, + uint32_t srcBLen, + q15_t * pDst); + + + /** + * @brief Convolution of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4 + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the block of output data Length srcALen+srcBLen-1. + * @param[in] pScratch1 points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2. + * @param[in] pScratch2 points to scratch buffer of size min(srcALen, srcBLen). + */ + void arm_conv_fast_opt_q15( + const q15_t * pSrcA, + uint32_t srcALen, + const q15_t * pSrcB, + uint32_t srcBLen, + q15_t * pDst, + q15_t * pScratch1, + q15_t * pScratch2); + + + /** + * @brief Convolution of Q31 sequences. + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the block of output data Length srcALen+srcBLen-1. + */ + void arm_conv_q31( + const q31_t * pSrcA, + uint32_t srcALen, + const q31_t * pSrcB, + uint32_t srcBLen, + q31_t * pDst); + + + /** + * @brief Convolution of Q31 sequences (fast version) for Cortex-M3 and Cortex-M4 + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the block of output data Length srcALen+srcBLen-1. + */ + void arm_conv_fast_q31( + const q31_t * pSrcA, + uint32_t srcALen, + const q31_t * pSrcB, + uint32_t srcBLen, + q31_t * pDst); + + + /** + * @brief Convolution of Q7 sequences. + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the block of output data Length srcALen+srcBLen-1. + * @param[in] pScratch1 points to scratch buffer(of type q15_t) of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2. + * @param[in] pScratch2 points to scratch buffer (of type q15_t) of size min(srcALen, srcBLen). + */ + void arm_conv_opt_q7( + const q7_t * pSrcA, + uint32_t srcALen, + const q7_t * pSrcB, + uint32_t srcBLen, + q7_t * pDst, + q15_t * pScratch1, + q15_t * pScratch2); + + + /** + * @brief Convolution of Q7 sequences. + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the block of output data Length srcALen+srcBLen-1. + */ + void arm_conv_q7( + const q7_t * pSrcA, + uint32_t srcALen, + const q7_t * pSrcB, + uint32_t srcBLen, + q7_t * pDst); + + + /** + * @brief Partial convolution of floating-point sequences. + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the block of output data + * @param[in] firstIndex is the first output sample to start with. + * @param[in] numPoints is the number of output points to be computed. + * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2]. + */ + arm_status arm_conv_partial_f32( + const float32_t * pSrcA, + uint32_t srcALen, + const float32_t * pSrcB, + uint32_t srcBLen, + float32_t * pDst, + uint32_t firstIndex, + uint32_t numPoints); + + + /** + * @brief Partial convolution of Q15 sequences. + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the block of output data + * @param[in] firstIndex is the first output sample to start with. + * @param[in] numPoints is the number of output points to be computed. + * @param[in] pScratch1 points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2. + * @param[in] pScratch2 points to scratch buffer of size min(srcALen, srcBLen). + * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2]. + */ + arm_status arm_conv_partial_opt_q15( + const q15_t * pSrcA, + uint32_t srcALen, + const q15_t * pSrcB, + uint32_t srcBLen, + q15_t * pDst, + uint32_t firstIndex, + uint32_t numPoints, + q15_t * pScratch1, + q15_t * pScratch2); + + + /** + * @brief Partial convolution of Q15 sequences. + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the block of output data + * @param[in] firstIndex is the first output sample to start with. + * @param[in] numPoints is the number of output points to be computed. + * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2]. + */ + arm_status arm_conv_partial_q15( + const q15_t * pSrcA, + uint32_t srcALen, + const q15_t * pSrcB, + uint32_t srcBLen, + q15_t * pDst, + uint32_t firstIndex, + uint32_t numPoints); + + + /** + * @brief Partial convolution of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4 + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the block of output data + * @param[in] firstIndex is the first output sample to start with. + * @param[in] numPoints is the number of output points to be computed. + * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2]. + */ + arm_status arm_conv_partial_fast_q15( + const q15_t * pSrcA, + uint32_t srcALen, + const q15_t * pSrcB, + uint32_t srcBLen, + q15_t * pDst, + uint32_t firstIndex, + uint32_t numPoints); + + + /** + * @brief Partial convolution of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4 + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the block of output data + * @param[in] firstIndex is the first output sample to start with. + * @param[in] numPoints is the number of output points to be computed. + * @param[in] pScratch1 points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2. + * @param[in] pScratch2 points to scratch buffer of size min(srcALen, srcBLen). + * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2]. + */ + arm_status arm_conv_partial_fast_opt_q15( + const q15_t * pSrcA, + uint32_t srcALen, + const q15_t * pSrcB, + uint32_t srcBLen, + q15_t * pDst, + uint32_t firstIndex, + uint32_t numPoints, + q15_t * pScratch1, + q15_t * pScratch2); + + + /** + * @brief Partial convolution of Q31 sequences. + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the block of output data + * @param[in] firstIndex is the first output sample to start with. + * @param[in] numPoints is the number of output points to be computed. + * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2]. + */ + arm_status arm_conv_partial_q31( + const q31_t * pSrcA, + uint32_t srcALen, + const q31_t * pSrcB, + uint32_t srcBLen, + q31_t * pDst, + uint32_t firstIndex, + uint32_t numPoints); + + + /** + * @brief Partial convolution of Q31 sequences (fast version) for Cortex-M3 and Cortex-M4 + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the block of output data + * @param[in] firstIndex is the first output sample to start with. + * @param[in] numPoints is the number of output points to be computed. + * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2]. + */ + arm_status arm_conv_partial_fast_q31( + const q31_t * pSrcA, + uint32_t srcALen, + const q31_t * pSrcB, + uint32_t srcBLen, + q31_t * pDst, + uint32_t firstIndex, + uint32_t numPoints); + + + /** + * @brief Partial convolution of Q7 sequences + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the block of output data + * @param[in] firstIndex is the first output sample to start with. + * @param[in] numPoints is the number of output points to be computed. + * @param[in] pScratch1 points to scratch buffer(of type q15_t) of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2. + * @param[in] pScratch2 points to scratch buffer (of type q15_t) of size min(srcALen, srcBLen). + * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2]. + */ + arm_status arm_conv_partial_opt_q7( + const q7_t * pSrcA, + uint32_t srcALen, + const q7_t * pSrcB, + uint32_t srcBLen, + q7_t * pDst, + uint32_t firstIndex, + uint32_t numPoints, + q15_t * pScratch1, + q15_t * pScratch2); + + +/** + * @brief Partial convolution of Q7 sequences. + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the block of output data + * @param[in] firstIndex is the first output sample to start with. + * @param[in] numPoints is the number of output points to be computed. + * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2]. + */ + arm_status arm_conv_partial_q7( + const q7_t * pSrcA, + uint32_t srcALen, + const q7_t * pSrcB, + uint32_t srcBLen, + q7_t * pDst, + uint32_t firstIndex, + uint32_t numPoints); + + + /** + * @brief Instance structure for the Q15 FIR decimator. + */ + typedef struct + { + uint8_t M; /**< decimation factor. */ + uint16_t numTaps; /**< number of coefficients in the filter. */ + const q15_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/ + q15_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ + } arm_fir_decimate_instance_q15; + + /** + * @brief Instance structure for the Q31 FIR decimator. + */ + typedef struct + { + uint8_t M; /**< decimation factor. */ + uint16_t numTaps; /**< number of coefficients in the filter. */ + const q31_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/ + q31_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ + } arm_fir_decimate_instance_q31; + +/** + @brief Instance structure for floating-point FIR decimator. + */ +typedef struct + { + uint8_t M; /**< decimation factor. */ + uint16_t numTaps; /**< number of coefficients in the filter. */ + const float32_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/ + float32_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ + } arm_fir_decimate_instance_f32; + + +/** + @brief Processing function for floating-point FIR decimator. + @param[in] S points to an instance of the floating-point FIR decimator structure + @param[in] pSrc points to the block of input data + @param[out] pDst points to the block of output data + @param[in] blockSize number of samples to process + */ +void arm_fir_decimate_f32( + const arm_fir_decimate_instance_f32 * S, + const float32_t * pSrc, + float32_t * pDst, + uint32_t blockSize); + + +/** + @brief Initialization function for the floating-point FIR decimator. + @param[in,out] S points to an instance of the floating-point FIR decimator structure + @param[in] numTaps number of coefficients in the filter + @param[in] M decimation factor + @param[in] pCoeffs points to the filter coefficients + @param[in] pState points to the state buffer + @param[in] blockSize number of input samples to process per call + @return execution status + - \ref ARM_MATH_SUCCESS : Operation successful + - \ref ARM_MATH_LENGTH_ERROR : blockSize is not a multiple of M + */ +arm_status arm_fir_decimate_init_f32( + arm_fir_decimate_instance_f32 * S, + uint16_t numTaps, + uint8_t M, + const float32_t * pCoeffs, + float32_t * pState, + uint32_t blockSize); + + + /** + * @brief Processing function for the Q15 FIR decimator. + * @param[in] S points to an instance of the Q15 FIR decimator structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data + * @param[in] blockSize number of input samples to process per call. + */ + void arm_fir_decimate_q15( + const arm_fir_decimate_instance_q15 * S, + const q15_t * pSrc, + q15_t * pDst, + uint32_t blockSize); + + + /** + * @brief Processing function for the Q15 FIR decimator (fast variant) for Cortex-M3 and Cortex-M4. + * @param[in] S points to an instance of the Q15 FIR decimator structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data + * @param[in] blockSize number of input samples to process per call. + */ + void arm_fir_decimate_fast_q15( + const arm_fir_decimate_instance_q15 * S, + const q15_t * pSrc, + q15_t * pDst, + uint32_t blockSize); + + + /** + * @brief Initialization function for the Q15 FIR decimator. + * @param[in,out] S points to an instance of the Q15 FIR decimator structure. + * @param[in] numTaps number of coefficients in the filter. + * @param[in] M decimation factor. + * @param[in] pCoeffs points to the filter coefficients. + * @param[in] pState points to the state buffer. + * @param[in] blockSize number of input samples to process per call. + * @return The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_LENGTH_ERROR if + * blockSize is not a multiple of M. + */ + arm_status arm_fir_decimate_init_q15( + arm_fir_decimate_instance_q15 * S, + uint16_t numTaps, + uint8_t M, + const q15_t * pCoeffs, + q15_t * pState, + uint32_t blockSize); + + + /** + * @brief Processing function for the Q31 FIR decimator. + * @param[in] S points to an instance of the Q31 FIR decimator structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data + * @param[in] blockSize number of input samples to process per call. + */ + void arm_fir_decimate_q31( + const arm_fir_decimate_instance_q31 * S, + const q31_t * pSrc, + q31_t * pDst, + uint32_t blockSize); + + /** + * @brief Processing function for the Q31 FIR decimator (fast variant) for Cortex-M3 and Cortex-M4. + * @param[in] S points to an instance of the Q31 FIR decimator structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data + * @param[in] blockSize number of input samples to process per call. + */ + void arm_fir_decimate_fast_q31( + const arm_fir_decimate_instance_q31 * S, + const q31_t * pSrc, + q31_t * pDst, + uint32_t blockSize); + + + /** + * @brief Initialization function for the Q31 FIR decimator. + * @param[in,out] S points to an instance of the Q31 FIR decimator structure. + * @param[in] numTaps number of coefficients in the filter. + * @param[in] M decimation factor. + * @param[in] pCoeffs points to the filter coefficients. + * @param[in] pState points to the state buffer. + * @param[in] blockSize number of input samples to process per call. + * @return The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_LENGTH_ERROR if + * blockSize is not a multiple of M. + */ + arm_status arm_fir_decimate_init_q31( + arm_fir_decimate_instance_q31 * S, + uint16_t numTaps, + uint8_t M, + const q31_t * pCoeffs, + q31_t * pState, + uint32_t blockSize); + + + /** + * @brief Instance structure for the Q15 FIR interpolator. + */ + typedef struct + { + uint8_t L; /**< upsample factor. */ + uint16_t phaseLength; /**< length of each polyphase filter component. */ + const q15_t *pCoeffs; /**< points to the coefficient array. The array is of length L*phaseLength. */ + q15_t *pState; /**< points to the state variable array. The array is of length blockSize+phaseLength-1. */ + } arm_fir_interpolate_instance_q15; + + /** + * @brief Instance structure for the Q31 FIR interpolator. + */ + typedef struct + { + uint8_t L; /**< upsample factor. */ + uint16_t phaseLength; /**< length of each polyphase filter component. */ + const q31_t *pCoeffs; /**< points to the coefficient array. The array is of length L*phaseLength. */ + q31_t *pState; /**< points to the state variable array. The array is of length blockSize+phaseLength-1. */ + } arm_fir_interpolate_instance_q31; + + /** + * @brief Instance structure for the floating-point FIR interpolator. + */ + typedef struct + { + uint8_t L; /**< upsample factor. */ + uint16_t phaseLength; /**< length of each polyphase filter component. */ + const float32_t *pCoeffs; /**< points to the coefficient array. The array is of length L*phaseLength. */ + float32_t *pState; /**< points to the state variable array. The array is of length phaseLength+numTaps-1. */ + } arm_fir_interpolate_instance_f32; + + + /** + * @brief Processing function for the Q15 FIR interpolator. + * @param[in] S points to an instance of the Q15 FIR interpolator structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data. + * @param[in] blockSize number of input samples to process per call. + */ + void arm_fir_interpolate_q15( + const arm_fir_interpolate_instance_q15 * S, + const q15_t * pSrc, + q15_t * pDst, + uint32_t blockSize); + + + /** + * @brief Initialization function for the Q15 FIR interpolator. + * @param[in,out] S points to an instance of the Q15 FIR interpolator structure. + * @param[in] L upsample factor. + * @param[in] numTaps number of filter coefficients in the filter. + * @param[in] pCoeffs points to the filter coefficient buffer. + * @param[in] pState points to the state buffer. + * @param[in] blockSize number of input samples to process per call. + * @return The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_LENGTH_ERROR if + * the filter length numTaps is not a multiple of the interpolation factor L. + */ + arm_status arm_fir_interpolate_init_q15( + arm_fir_interpolate_instance_q15 * S, + uint8_t L, + uint16_t numTaps, + const q15_t * pCoeffs, + q15_t * pState, + uint32_t blockSize); + + + /** + * @brief Processing function for the Q31 FIR interpolator. + * @param[in] S points to an instance of the Q15 FIR interpolator structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data. + * @param[in] blockSize number of input samples to process per call. + */ + void arm_fir_interpolate_q31( + const arm_fir_interpolate_instance_q31 * S, + const q31_t * pSrc, + q31_t * pDst, + uint32_t blockSize); + + + /** + * @brief Initialization function for the Q31 FIR interpolator. + * @param[in,out] S points to an instance of the Q31 FIR interpolator structure. + * @param[in] L upsample factor. + * @param[in] numTaps number of filter coefficients in the filter. + * @param[in] pCoeffs points to the filter coefficient buffer. + * @param[in] pState points to the state buffer. + * @param[in] blockSize number of input samples to process per call. + * @return The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_LENGTH_ERROR if + * the filter length numTaps is not a multiple of the interpolation factor L. + */ + arm_status arm_fir_interpolate_init_q31( + arm_fir_interpolate_instance_q31 * S, + uint8_t L, + uint16_t numTaps, + const q31_t * pCoeffs, + q31_t * pState, + uint32_t blockSize); + + + /** + * @brief Processing function for the floating-point FIR interpolator. + * @param[in] S points to an instance of the floating-point FIR interpolator structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data. + * @param[in] blockSize number of input samples to process per call. + */ + void arm_fir_interpolate_f32( + const arm_fir_interpolate_instance_f32 * S, + const float32_t * pSrc, + float32_t * pDst, + uint32_t blockSize); + + + /** + * @brief Initialization function for the floating-point FIR interpolator. + * @param[in,out] S points to an instance of the floating-point FIR interpolator structure. + * @param[in] L upsample factor. + * @param[in] numTaps number of filter coefficients in the filter. + * @param[in] pCoeffs points to the filter coefficient buffer. + * @param[in] pState points to the state buffer. + * @param[in] blockSize number of input samples to process per call. + * @return The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_LENGTH_ERROR if + * the filter length numTaps is not a multiple of the interpolation factor L. + */ + arm_status arm_fir_interpolate_init_f32( + arm_fir_interpolate_instance_f32 * S, + uint8_t L, + uint16_t numTaps, + const float32_t * pCoeffs, + float32_t * pState, + uint32_t blockSize); + + + /** + * @brief Instance structure for the high precision Q31 Biquad cascade filter. + */ + typedef struct + { + uint8_t numStages; /**< number of 2nd order stages in the filter. Overall order is 2*numStages. */ + q63_t *pState; /**< points to the array of state coefficients. The array is of length 4*numStages. */ + const q31_t *pCoeffs; /**< points to the array of coefficients. The array is of length 5*numStages. */ + uint8_t postShift; /**< additional shift, in bits, applied to each output sample. */ + } arm_biquad_cas_df1_32x64_ins_q31; + + + /** + * @param[in] S points to an instance of the high precision Q31 Biquad cascade filter structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data + * @param[in] blockSize number of samples to process. + */ + void arm_biquad_cas_df1_32x64_q31( + const arm_biquad_cas_df1_32x64_ins_q31 * S, + const q31_t * pSrc, + q31_t * pDst, + uint32_t blockSize); + + + /** + * @param[in,out] S points to an instance of the high precision Q31 Biquad cascade filter structure. + * @param[in] numStages number of 2nd order stages in the filter. + * @param[in] pCoeffs points to the filter coefficients. + * @param[in] pState points to the state buffer. + * @param[in] postShift shift to be applied to the output. Varies according to the coefficients format + */ + void arm_biquad_cas_df1_32x64_init_q31( + arm_biquad_cas_df1_32x64_ins_q31 * S, + uint8_t numStages, + const q31_t * pCoeffs, + q63_t * pState, + uint8_t postShift); + + + /** + * @brief Instance structure for the floating-point transposed direct form II Biquad cascade filter. + */ + typedef struct + { + uint8_t numStages; /**< number of 2nd order stages in the filter. Overall order is 2*numStages. */ + float32_t *pState; /**< points to the array of state coefficients. The array is of length 2*numStages. */ + const float32_t *pCoeffs; /**< points to the array of coefficients. The array is of length 5*numStages. */ + } arm_biquad_cascade_df2T_instance_f32; + + /** + * @brief Instance structure for the floating-point transposed direct form II Biquad cascade filter. + */ + typedef struct + { + uint8_t numStages; /**< number of 2nd order stages in the filter. Overall order is 2*numStages. */ + float32_t *pState; /**< points to the array of state coefficients. The array is of length 4*numStages. */ + const float32_t *pCoeffs; /**< points to the array of coefficients. The array is of length 5*numStages. */ + } arm_biquad_cascade_stereo_df2T_instance_f32; + + /** + * @brief Instance structure for the floating-point transposed direct form II Biquad cascade filter. + */ + typedef struct + { + uint8_t numStages; /**< number of 2nd order stages in the filter. Overall order is 2*numStages. */ + float64_t *pState; /**< points to the array of state coefficients. The array is of length 2*numStages. */ + const float64_t *pCoeffs; /**< points to the array of coefficients. The array is of length 5*numStages. */ + } arm_biquad_cascade_df2T_instance_f64; + + + /** + * @brief Processing function for the floating-point transposed direct form II Biquad cascade filter. + * @param[in] S points to an instance of the filter data structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data + * @param[in] blockSize number of samples to process. + */ + void arm_biquad_cascade_df2T_f32( + const arm_biquad_cascade_df2T_instance_f32 * S, + const float32_t * pSrc, + float32_t * pDst, + uint32_t blockSize); + + + /** + * @brief Processing function for the floating-point transposed direct form II Biquad cascade filter. 2 channels + * @param[in] S points to an instance of the filter data structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data + * @param[in] blockSize number of samples to process. + */ + void arm_biquad_cascade_stereo_df2T_f32( + const arm_biquad_cascade_stereo_df2T_instance_f32 * S, + const float32_t * pSrc, + float32_t * pDst, + uint32_t blockSize); + + + /** + * @brief Processing function for the floating-point transposed direct form II Biquad cascade filter. + * @param[in] S points to an instance of the filter data structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data + * @param[in] blockSize number of samples to process. + */ + void arm_biquad_cascade_df2T_f64( + const arm_biquad_cascade_df2T_instance_f64 * S, + const float64_t * pSrc, + float64_t * pDst, + uint32_t blockSize); + + +#if defined(ARM_MATH_NEON) +void arm_biquad_cascade_df2T_compute_coefs_f32( + arm_biquad_cascade_df2T_instance_f32 * S, + uint8_t numStages, + float32_t * pCoeffs); +#endif + /** + * @brief Initialization function for the floating-point transposed direct form II Biquad cascade filter. + * @param[in,out] S points to an instance of the filter data structure. + * @param[in] numStages number of 2nd order stages in the filter. + * @param[in] pCoeffs points to the filter coefficients. + * @param[in] pState points to the state buffer. + */ + void arm_biquad_cascade_df2T_init_f32( + arm_biquad_cascade_df2T_instance_f32 * S, + uint8_t numStages, + const float32_t * pCoeffs, + float32_t * pState); + + + /** + * @brief Initialization function for the floating-point transposed direct form II Biquad cascade filter. + * @param[in,out] S points to an instance of the filter data structure. + * @param[in] numStages number of 2nd order stages in the filter. + * @param[in] pCoeffs points to the filter coefficients. + * @param[in] pState points to the state buffer. + */ + void arm_biquad_cascade_stereo_df2T_init_f32( + arm_biquad_cascade_stereo_df2T_instance_f32 * S, + uint8_t numStages, + const float32_t * pCoeffs, + float32_t * pState); + + + /** + * @brief Initialization function for the floating-point transposed direct form II Biquad cascade filter. + * @param[in,out] S points to an instance of the filter data structure. + * @param[in] numStages number of 2nd order stages in the filter. + * @param[in] pCoeffs points to the filter coefficients. + * @param[in] pState points to the state buffer. + */ + void arm_biquad_cascade_df2T_init_f64( + arm_biquad_cascade_df2T_instance_f64 * S, + uint8_t numStages, + const float64_t * pCoeffs, + float64_t * pState); + + + /** + * @brief Instance structure for the Q15 FIR lattice filter. + */ + typedef struct + { + uint16_t numStages; /**< number of filter stages. */ + q15_t *pState; /**< points to the state variable array. The array is of length numStages. */ + const q15_t *pCoeffs; /**< points to the coefficient array. The array is of length numStages. */ + } arm_fir_lattice_instance_q15; + + /** + * @brief Instance structure for the Q31 FIR lattice filter. + */ + typedef struct + { + uint16_t numStages; /**< number of filter stages. */ + q31_t *pState; /**< points to the state variable array. The array is of length numStages. */ + const q31_t *pCoeffs; /**< points to the coefficient array. The array is of length numStages. */ + } arm_fir_lattice_instance_q31; + + /** + * @brief Instance structure for the floating-point FIR lattice filter. + */ + typedef struct + { + uint16_t numStages; /**< number of filter stages. */ + float32_t *pState; /**< points to the state variable array. The array is of length numStages. */ + const float32_t *pCoeffs; /**< points to the coefficient array. The array is of length numStages. */ + } arm_fir_lattice_instance_f32; + + + /** + * @brief Initialization function for the Q15 FIR lattice filter. + * @param[in] S points to an instance of the Q15 FIR lattice structure. + * @param[in] numStages number of filter stages. + * @param[in] pCoeffs points to the coefficient buffer. The array is of length numStages. + * @param[in] pState points to the state buffer. The array is of length numStages. + */ + void arm_fir_lattice_init_q15( + arm_fir_lattice_instance_q15 * S, + uint16_t numStages, + const q15_t * pCoeffs, + q15_t * pState); + + + /** + * @brief Processing function for the Q15 FIR lattice filter. + * @param[in] S points to an instance of the Q15 FIR lattice structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data. + * @param[in] blockSize number of samples to process. + */ + void arm_fir_lattice_q15( + const arm_fir_lattice_instance_q15 * S, + const q15_t * pSrc, + q15_t * pDst, + uint32_t blockSize); + + + /** + * @brief Initialization function for the Q31 FIR lattice filter. + * @param[in] S points to an instance of the Q31 FIR lattice structure. + * @param[in] numStages number of filter stages. + * @param[in] pCoeffs points to the coefficient buffer. The array is of length numStages. + * @param[in] pState points to the state buffer. The array is of length numStages. + */ + void arm_fir_lattice_init_q31( + arm_fir_lattice_instance_q31 * S, + uint16_t numStages, + const q31_t * pCoeffs, + q31_t * pState); + + + /** + * @brief Processing function for the Q31 FIR lattice filter. + * @param[in] S points to an instance of the Q31 FIR lattice structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data + * @param[in] blockSize number of samples to process. + */ + void arm_fir_lattice_q31( + const arm_fir_lattice_instance_q31 * S, + const q31_t * pSrc, + q31_t * pDst, + uint32_t blockSize); + + +/** + * @brief Initialization function for the floating-point FIR lattice filter. + * @param[in] S points to an instance of the floating-point FIR lattice structure. + * @param[in] numStages number of filter stages. + * @param[in] pCoeffs points to the coefficient buffer. The array is of length numStages. + * @param[in] pState points to the state buffer. The array is of length numStages. + */ + void arm_fir_lattice_init_f32( + arm_fir_lattice_instance_f32 * S, + uint16_t numStages, + const float32_t * pCoeffs, + float32_t * pState); + + + /** + * @brief Processing function for the floating-point FIR lattice filter. + * @param[in] S points to an instance of the floating-point FIR lattice structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data + * @param[in] blockSize number of samples to process. + */ + void arm_fir_lattice_f32( + const arm_fir_lattice_instance_f32 * S, + const float32_t * pSrc, + float32_t * pDst, + uint32_t blockSize); + + + /** + * @brief Instance structure for the Q15 IIR lattice filter. + */ + typedef struct + { + uint16_t numStages; /**< number of stages in the filter. */ + q15_t *pState; /**< points to the state variable array. The array is of length numStages+blockSize. */ + q15_t *pkCoeffs; /**< points to the reflection coefficient array. The array is of length numStages. */ + q15_t *pvCoeffs; /**< points to the ladder coefficient array. The array is of length numStages+1. */ + } arm_iir_lattice_instance_q15; + + /** + * @brief Instance structure for the Q31 IIR lattice filter. + */ + typedef struct + { + uint16_t numStages; /**< number of stages in the filter. */ + q31_t *pState; /**< points to the state variable array. The array is of length numStages+blockSize. */ + q31_t *pkCoeffs; /**< points to the reflection coefficient array. The array is of length numStages. */ + q31_t *pvCoeffs; /**< points to the ladder coefficient array. The array is of length numStages+1. */ + } arm_iir_lattice_instance_q31; + + /** + * @brief Instance structure for the floating-point IIR lattice filter. + */ + typedef struct + { + uint16_t numStages; /**< number of stages in the filter. */ + float32_t *pState; /**< points to the state variable array. The array is of length numStages+blockSize. */ + float32_t *pkCoeffs; /**< points to the reflection coefficient array. The array is of length numStages. */ + float32_t *pvCoeffs; /**< points to the ladder coefficient array. The array is of length numStages+1. */ + } arm_iir_lattice_instance_f32; + + + /** + * @brief Processing function for the floating-point IIR lattice filter. + * @param[in] S points to an instance of the floating-point IIR lattice structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data. + * @param[in] blockSize number of samples to process. + */ + void arm_iir_lattice_f32( + const arm_iir_lattice_instance_f32 * S, + const float32_t * pSrc, + float32_t * pDst, + uint32_t blockSize); + + + /** + * @brief Initialization function for the floating-point IIR lattice filter. + * @param[in] S points to an instance of the floating-point IIR lattice structure. + * @param[in] numStages number of stages in the filter. + * @param[in] pkCoeffs points to the reflection coefficient buffer. The array is of length numStages. + * @param[in] pvCoeffs points to the ladder coefficient buffer. The array is of length numStages+1. + * @param[in] pState points to the state buffer. The array is of length numStages+blockSize-1. + * @param[in] blockSize number of samples to process. + */ + void arm_iir_lattice_init_f32( + arm_iir_lattice_instance_f32 * S, + uint16_t numStages, + float32_t * pkCoeffs, + float32_t * pvCoeffs, + float32_t * pState, + uint32_t blockSize); + + + /** + * @brief Processing function for the Q31 IIR lattice filter. + * @param[in] S points to an instance of the Q31 IIR lattice structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data. + * @param[in] blockSize number of samples to process. + */ + void arm_iir_lattice_q31( + const arm_iir_lattice_instance_q31 * S, + const q31_t * pSrc, + q31_t * pDst, + uint32_t blockSize); + + + /** + * @brief Initialization function for the Q31 IIR lattice filter. + * @param[in] S points to an instance of the Q31 IIR lattice structure. + * @param[in] numStages number of stages in the filter. + * @param[in] pkCoeffs points to the reflection coefficient buffer. The array is of length numStages. + * @param[in] pvCoeffs points to the ladder coefficient buffer. The array is of length numStages+1. + * @param[in] pState points to the state buffer. The array is of length numStages+blockSize. + * @param[in] blockSize number of samples to process. + */ + void arm_iir_lattice_init_q31( + arm_iir_lattice_instance_q31 * S, + uint16_t numStages, + q31_t * pkCoeffs, + q31_t * pvCoeffs, + q31_t * pState, + uint32_t blockSize); + + + /** + * @brief Processing function for the Q15 IIR lattice filter. + * @param[in] S points to an instance of the Q15 IIR lattice structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data. + * @param[in] blockSize number of samples to process. + */ + void arm_iir_lattice_q15( + const arm_iir_lattice_instance_q15 * S, + const q15_t * pSrc, + q15_t * pDst, + uint32_t blockSize); + + +/** + * @brief Initialization function for the Q15 IIR lattice filter. + * @param[in] S points to an instance of the fixed-point Q15 IIR lattice structure. + * @param[in] numStages number of stages in the filter. + * @param[in] pkCoeffs points to reflection coefficient buffer. The array is of length numStages. + * @param[in] pvCoeffs points to ladder coefficient buffer. The array is of length numStages+1. + * @param[in] pState points to state buffer. The array is of length numStages+blockSize. + * @param[in] blockSize number of samples to process per call. + */ + void arm_iir_lattice_init_q15( + arm_iir_lattice_instance_q15 * S, + uint16_t numStages, + q15_t * pkCoeffs, + q15_t * pvCoeffs, + q15_t * pState, + uint32_t blockSize); + + + /** + * @brief Instance structure for the floating-point LMS filter. + */ + typedef struct + { + uint16_t numTaps; /**< number of coefficients in the filter. */ + float32_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ + float32_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */ + float32_t mu; /**< step size that controls filter coefficient updates. */ + } arm_lms_instance_f32; + + + /** + * @brief Processing function for floating-point LMS filter. + * @param[in] S points to an instance of the floating-point LMS filter structure. + * @param[in] pSrc points to the block of input data. + * @param[in] pRef points to the block of reference data. + * @param[out] pOut points to the block of output data. + * @param[out] pErr points to the block of error data. + * @param[in] blockSize number of samples to process. + */ + void arm_lms_f32( + const arm_lms_instance_f32 * S, + const float32_t * pSrc, + float32_t * pRef, + float32_t * pOut, + float32_t * pErr, + uint32_t blockSize); + + + /** + * @brief Initialization function for floating-point LMS filter. + * @param[in] S points to an instance of the floating-point LMS filter structure. + * @param[in] numTaps number of filter coefficients. + * @param[in] pCoeffs points to the coefficient buffer. + * @param[in] pState points to state buffer. + * @param[in] mu step size that controls filter coefficient updates. + * @param[in] blockSize number of samples to process. + */ + void arm_lms_init_f32( + arm_lms_instance_f32 * S, + uint16_t numTaps, + float32_t * pCoeffs, + float32_t * pState, + float32_t mu, + uint32_t blockSize); + + + /** + * @brief Instance structure for the Q15 LMS filter. + */ + typedef struct + { + uint16_t numTaps; /**< number of coefficients in the filter. */ + q15_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ + q15_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */ + q15_t mu; /**< step size that controls filter coefficient updates. */ + uint32_t postShift; /**< bit shift applied to coefficients. */ + } arm_lms_instance_q15; + + + /** + * @brief Initialization function for the Q15 LMS filter. + * @param[in] S points to an instance of the Q15 LMS filter structure. + * @param[in] numTaps number of filter coefficients. + * @param[in] pCoeffs points to the coefficient buffer. + * @param[in] pState points to the state buffer. + * @param[in] mu step size that controls filter coefficient updates. + * @param[in] blockSize number of samples to process. + * @param[in] postShift bit shift applied to coefficients. + */ + void arm_lms_init_q15( + arm_lms_instance_q15 * S, + uint16_t numTaps, + q15_t * pCoeffs, + q15_t * pState, + q15_t mu, + uint32_t blockSize, + uint32_t postShift); + + + /** + * @brief Processing function for Q15 LMS filter. + * @param[in] S points to an instance of the Q15 LMS filter structure. + * @param[in] pSrc points to the block of input data. + * @param[in] pRef points to the block of reference data. + * @param[out] pOut points to the block of output data. + * @param[out] pErr points to the block of error data. + * @param[in] blockSize number of samples to process. + */ + void arm_lms_q15( + const arm_lms_instance_q15 * S, + const q15_t * pSrc, + q15_t * pRef, + q15_t * pOut, + q15_t * pErr, + uint32_t blockSize); + + + /** + * @brief Instance structure for the Q31 LMS filter. + */ + typedef struct + { + uint16_t numTaps; /**< number of coefficients in the filter. */ + q31_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ + q31_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */ + q31_t mu; /**< step size that controls filter coefficient updates. */ + uint32_t postShift; /**< bit shift applied to coefficients. */ + } arm_lms_instance_q31; + + + /** + * @brief Processing function for Q31 LMS filter. + * @param[in] S points to an instance of the Q15 LMS filter structure. + * @param[in] pSrc points to the block of input data. + * @param[in] pRef points to the block of reference data. + * @param[out] pOut points to the block of output data. + * @param[out] pErr points to the block of error data. + * @param[in] blockSize number of samples to process. + */ + void arm_lms_q31( + const arm_lms_instance_q31 * S, + const q31_t * pSrc, + q31_t * pRef, + q31_t * pOut, + q31_t * pErr, + uint32_t blockSize); + + + /** + * @brief Initialization function for Q31 LMS filter. + * @param[in] S points to an instance of the Q31 LMS filter structure. + * @param[in] numTaps number of filter coefficients. + * @param[in] pCoeffs points to coefficient buffer. + * @param[in] pState points to state buffer. + * @param[in] mu step size that controls filter coefficient updates. + * @param[in] blockSize number of samples to process. + * @param[in] postShift bit shift applied to coefficients. + */ + void arm_lms_init_q31( + arm_lms_instance_q31 * S, + uint16_t numTaps, + q31_t * pCoeffs, + q31_t * pState, + q31_t mu, + uint32_t blockSize, + uint32_t postShift); + + + /** + * @brief Instance structure for the floating-point normalized LMS filter. + */ + typedef struct + { + uint16_t numTaps; /**< number of coefficients in the filter. */ + float32_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ + float32_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */ + float32_t mu; /**< step size that control filter coefficient updates. */ + float32_t energy; /**< saves previous frame energy. */ + float32_t x0; /**< saves previous input sample. */ + } arm_lms_norm_instance_f32; + + + /** + * @brief Processing function for floating-point normalized LMS filter. + * @param[in] S points to an instance of the floating-point normalized LMS filter structure. + * @param[in] pSrc points to the block of input data. + * @param[in] pRef points to the block of reference data. + * @param[out] pOut points to the block of output data. + * @param[out] pErr points to the block of error data. + * @param[in] blockSize number of samples to process. + */ + void arm_lms_norm_f32( + arm_lms_norm_instance_f32 * S, + const float32_t * pSrc, + float32_t * pRef, + float32_t * pOut, + float32_t * pErr, + uint32_t blockSize); + + + /** + * @brief Initialization function for floating-point normalized LMS filter. + * @param[in] S points to an instance of the floating-point LMS filter structure. + * @param[in] numTaps number of filter coefficients. + * @param[in] pCoeffs points to coefficient buffer. + * @param[in] pState points to state buffer. + * @param[in] mu step size that controls filter coefficient updates. + * @param[in] blockSize number of samples to process. + */ + void arm_lms_norm_init_f32( + arm_lms_norm_instance_f32 * S, + uint16_t numTaps, + float32_t * pCoeffs, + float32_t * pState, + float32_t mu, + uint32_t blockSize); + + + /** + * @brief Instance structure for the Q31 normalized LMS filter. + */ + typedef struct + { + uint16_t numTaps; /**< number of coefficients in the filter. */ + q31_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ + q31_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */ + q31_t mu; /**< step size that controls filter coefficient updates. */ + uint8_t postShift; /**< bit shift applied to coefficients. */ + const q31_t *recipTable; /**< points to the reciprocal initial value table. */ + q31_t energy; /**< saves previous frame energy. */ + q31_t x0; /**< saves previous input sample. */ + } arm_lms_norm_instance_q31; + + + /** + * @brief Processing function for Q31 normalized LMS filter. + * @param[in] S points to an instance of the Q31 normalized LMS filter structure. + * @param[in] pSrc points to the block of input data. + * @param[in] pRef points to the block of reference data. + * @param[out] pOut points to the block of output data. + * @param[out] pErr points to the block of error data. + * @param[in] blockSize number of samples to process. + */ + void arm_lms_norm_q31( + arm_lms_norm_instance_q31 * S, + const q31_t * pSrc, + q31_t * pRef, + q31_t * pOut, + q31_t * pErr, + uint32_t blockSize); + + + /** + * @brief Initialization function for Q31 normalized LMS filter. + * @param[in] S points to an instance of the Q31 normalized LMS filter structure. + * @param[in] numTaps number of filter coefficients. + * @param[in] pCoeffs points to coefficient buffer. + * @param[in] pState points to state buffer. + * @param[in] mu step size that controls filter coefficient updates. + * @param[in] blockSize number of samples to process. + * @param[in] postShift bit shift applied to coefficients. + */ + void arm_lms_norm_init_q31( + arm_lms_norm_instance_q31 * S, + uint16_t numTaps, + q31_t * pCoeffs, + q31_t * pState, + q31_t mu, + uint32_t blockSize, + uint8_t postShift); + + + /** + * @brief Instance structure for the Q15 normalized LMS filter. + */ + typedef struct + { + uint16_t numTaps; /**< Number of coefficients in the filter. */ + q15_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ + q15_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */ + q15_t mu; /**< step size that controls filter coefficient updates. */ + uint8_t postShift; /**< bit shift applied to coefficients. */ + const q15_t *recipTable; /**< Points to the reciprocal initial value table. */ + q15_t energy; /**< saves previous frame energy. */ + q15_t x0; /**< saves previous input sample. */ + } arm_lms_norm_instance_q15; + + + /** + * @brief Processing function for Q15 normalized LMS filter. + * @param[in] S points to an instance of the Q15 normalized LMS filter structure. + * @param[in] pSrc points to the block of input data. + * @param[in] pRef points to the block of reference data. + * @param[out] pOut points to the block of output data. + * @param[out] pErr points to the block of error data. + * @param[in] blockSize number of samples to process. + */ + void arm_lms_norm_q15( + arm_lms_norm_instance_q15 * S, + const q15_t * pSrc, + q15_t * pRef, + q15_t * pOut, + q15_t * pErr, + uint32_t blockSize); + + + /** + * @brief Initialization function for Q15 normalized LMS filter. + * @param[in] S points to an instance of the Q15 normalized LMS filter structure. + * @param[in] numTaps number of filter coefficients. + * @param[in] pCoeffs points to coefficient buffer. + * @param[in] pState points to state buffer. + * @param[in] mu step size that controls filter coefficient updates. + * @param[in] blockSize number of samples to process. + * @param[in] postShift bit shift applied to coefficients. + */ + void arm_lms_norm_init_q15( + arm_lms_norm_instance_q15 * S, + uint16_t numTaps, + q15_t * pCoeffs, + q15_t * pState, + q15_t mu, + uint32_t blockSize, + uint8_t postShift); + + + /** + * @brief Correlation of floating-point sequences. + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1. + */ + void arm_correlate_f32( + const float32_t * pSrcA, + uint32_t srcALen, + const float32_t * pSrcB, + uint32_t srcBLen, + float32_t * pDst); + + +/** + @brief Correlation of Q15 sequences + @param[in] pSrcA points to the first input sequence + @param[in] srcALen length of the first input sequence + @param[in] pSrcB points to the second input sequence + @param[in] srcBLen length of the second input sequence + @param[out] pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1. + @param[in] pScratch points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2. +*/ +void arm_correlate_opt_q15( + const q15_t * pSrcA, + uint32_t srcALen, + const q15_t * pSrcB, + uint32_t srcBLen, + q15_t * pDst, + q15_t * pScratch); + + +/** + @brief Correlation of Q15 sequences. + @param[in] pSrcA points to the first input sequence + @param[in] srcALen length of the first input sequence + @param[in] pSrcB points to the second input sequence + @param[in] srcBLen length of the second input sequence + @param[out] pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1. + */ + void arm_correlate_q15( + const q15_t * pSrcA, + uint32_t srcALen, + const q15_t * pSrcB, + uint32_t srcBLen, + q15_t * pDst); + + +/** + @brief Correlation of Q15 sequences (fast version). + @param[in] pSrcA points to the first input sequence + @param[in] srcALen length of the first input sequence + @param[in] pSrcB points to the second input sequence + @param[in] srcBLen length of the second input sequence + @param[out] pDst points to the location where the output result is written. Length 2 * max(srcALen, srcBLen) - 1. + @return none + */ +void arm_correlate_fast_q15( + const q15_t * pSrcA, + uint32_t srcALen, + const q15_t * pSrcB, + uint32_t srcBLen, + q15_t * pDst); + + +/** + @brief Correlation of Q15 sequences (fast version). + @param[in] pSrcA points to the first input sequence. + @param[in] srcALen length of the first input sequence. + @param[in] pSrcB points to the second input sequence. + @param[in] srcBLen length of the second input sequence. + @param[out] pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1. + @param[in] pScratch points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2. + */ +void arm_correlate_fast_opt_q15( + const q15_t * pSrcA, + uint32_t srcALen, + const q15_t * pSrcB, + uint32_t srcBLen, + q15_t * pDst, + q15_t * pScratch); + + + /** + * @brief Correlation of Q31 sequences. + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1. + */ + void arm_correlate_q31( + const q31_t * pSrcA, + uint32_t srcALen, + const q31_t * pSrcB, + uint32_t srcBLen, + q31_t * pDst); + + +/** + @brief Correlation of Q31 sequences (fast version). + @param[in] pSrcA points to the first input sequence + @param[in] srcALen length of the first input sequence + @param[in] pSrcB points to the second input sequence + @param[in] srcBLen length of the second input sequence + @param[out] pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1. + */ +void arm_correlate_fast_q31( + const q31_t * pSrcA, + uint32_t srcALen, + const q31_t * pSrcB, + uint32_t srcBLen, + q31_t * pDst); + + + /** + * @brief Correlation of Q7 sequences. + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1. + * @param[in] pScratch1 points to scratch buffer(of type q15_t) of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2. + * @param[in] pScratch2 points to scratch buffer (of type q15_t) of size min(srcALen, srcBLen). + */ + void arm_correlate_opt_q7( + const q7_t * pSrcA, + uint32_t srcALen, + const q7_t * pSrcB, + uint32_t srcBLen, + q7_t * pDst, + q15_t * pScratch1, + q15_t * pScratch2); + + + /** + * @brief Correlation of Q7 sequences. + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1. + */ + void arm_correlate_q7( + const q7_t * pSrcA, + uint32_t srcALen, + const q7_t * pSrcB, + uint32_t srcBLen, + q7_t * pDst); + + + /** + * @brief Instance structure for the floating-point sparse FIR filter. + */ + typedef struct + { + uint16_t numTaps; /**< number of coefficients in the filter. */ + uint16_t stateIndex; /**< state buffer index. Points to the oldest sample in the state buffer. */ + float32_t *pState; /**< points to the state buffer array. The array is of length maxDelay+blockSize-1. */ + const float32_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/ + uint16_t maxDelay; /**< maximum offset specified by the pTapDelay array. */ + int32_t *pTapDelay; /**< points to the array of delay values. The array is of length numTaps. */ + } arm_fir_sparse_instance_f32; + + /** + * @brief Instance structure for the Q31 sparse FIR filter. + */ + typedef struct + { + uint16_t numTaps; /**< number of coefficients in the filter. */ + uint16_t stateIndex; /**< state buffer index. Points to the oldest sample in the state buffer. */ + q31_t *pState; /**< points to the state buffer array. The array is of length maxDelay+blockSize-1. */ + const q31_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/ + uint16_t maxDelay; /**< maximum offset specified by the pTapDelay array. */ + int32_t *pTapDelay; /**< points to the array of delay values. The array is of length numTaps. */ + } arm_fir_sparse_instance_q31; + + /** + * @brief Instance structure for the Q15 sparse FIR filter. + */ + typedef struct + { + uint16_t numTaps; /**< number of coefficients in the filter. */ + uint16_t stateIndex; /**< state buffer index. Points to the oldest sample in the state buffer. */ + q15_t *pState; /**< points to the state buffer array. The array is of length maxDelay+blockSize-1. */ + const q15_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/ + uint16_t maxDelay; /**< maximum offset specified by the pTapDelay array. */ + int32_t *pTapDelay; /**< points to the array of delay values. The array is of length numTaps. */ + } arm_fir_sparse_instance_q15; + + /** + * @brief Instance structure for the Q7 sparse FIR filter. + */ + typedef struct + { + uint16_t numTaps; /**< number of coefficients in the filter. */ + uint16_t stateIndex; /**< state buffer index. Points to the oldest sample in the state buffer. */ + q7_t *pState; /**< points to the state buffer array. The array is of length maxDelay+blockSize-1. */ + const q7_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/ + uint16_t maxDelay; /**< maximum offset specified by the pTapDelay array. */ + int32_t *pTapDelay; /**< points to the array of delay values. The array is of length numTaps. */ + } arm_fir_sparse_instance_q7; + + + /** + * @brief Processing function for the floating-point sparse FIR filter. + * @param[in] S points to an instance of the floating-point sparse FIR structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data + * @param[in] pScratchIn points to a temporary buffer of size blockSize. + * @param[in] blockSize number of input samples to process per call. + */ + void arm_fir_sparse_f32( + arm_fir_sparse_instance_f32 * S, + const float32_t * pSrc, + float32_t * pDst, + float32_t * pScratchIn, + uint32_t blockSize); + + + /** + * @brief Initialization function for the floating-point sparse FIR filter. + * @param[in,out] S points to an instance of the floating-point sparse FIR structure. + * @param[in] numTaps number of nonzero coefficients in the filter. + * @param[in] pCoeffs points to the array of filter coefficients. + * @param[in] pState points to the state buffer. + * @param[in] pTapDelay points to the array of offset times. + * @param[in] maxDelay maximum offset time supported. + * @param[in] blockSize number of samples that will be processed per block. + */ + void arm_fir_sparse_init_f32( + arm_fir_sparse_instance_f32 * S, + uint16_t numTaps, + const float32_t * pCoeffs, + float32_t * pState, + int32_t * pTapDelay, + uint16_t maxDelay, + uint32_t blockSize); + + + /** + * @brief Processing function for the Q31 sparse FIR filter. + * @param[in] S points to an instance of the Q31 sparse FIR structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data + * @param[in] pScratchIn points to a temporary buffer of size blockSize. + * @param[in] blockSize number of input samples to process per call. + */ + void arm_fir_sparse_q31( + arm_fir_sparse_instance_q31 * S, + const q31_t * pSrc, + q31_t * pDst, + q31_t * pScratchIn, + uint32_t blockSize); + + + /** + * @brief Initialization function for the Q31 sparse FIR filter. + * @param[in,out] S points to an instance of the Q31 sparse FIR structure. + * @param[in] numTaps number of nonzero coefficients in the filter. + * @param[in] pCoeffs points to the array of filter coefficients. + * @param[in] pState points to the state buffer. + * @param[in] pTapDelay points to the array of offset times. + * @param[in] maxDelay maximum offset time supported. + * @param[in] blockSize number of samples that will be processed per block. + */ + void arm_fir_sparse_init_q31( + arm_fir_sparse_instance_q31 * S, + uint16_t numTaps, + const q31_t * pCoeffs, + q31_t * pState, + int32_t * pTapDelay, + uint16_t maxDelay, + uint32_t blockSize); + + + /** + * @brief Processing function for the Q15 sparse FIR filter. + * @param[in] S points to an instance of the Q15 sparse FIR structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data + * @param[in] pScratchIn points to a temporary buffer of size blockSize. + * @param[in] pScratchOut points to a temporary buffer of size blockSize. + * @param[in] blockSize number of input samples to process per call. + */ + void arm_fir_sparse_q15( + arm_fir_sparse_instance_q15 * S, + const q15_t * pSrc, + q15_t * pDst, + q15_t * pScratchIn, + q31_t * pScratchOut, + uint32_t blockSize); + + + /** + * @brief Initialization function for the Q15 sparse FIR filter. + * @param[in,out] S points to an instance of the Q15 sparse FIR structure. + * @param[in] numTaps number of nonzero coefficients in the filter. + * @param[in] pCoeffs points to the array of filter coefficients. + * @param[in] pState points to the state buffer. + * @param[in] pTapDelay points to the array of offset times. + * @param[in] maxDelay maximum offset time supported. + * @param[in] blockSize number of samples that will be processed per block. + */ + void arm_fir_sparse_init_q15( + arm_fir_sparse_instance_q15 * S, + uint16_t numTaps, + const q15_t * pCoeffs, + q15_t * pState, + int32_t * pTapDelay, + uint16_t maxDelay, + uint32_t blockSize); + + + /** + * @brief Processing function for the Q7 sparse FIR filter. + * @param[in] S points to an instance of the Q7 sparse FIR structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data + * @param[in] pScratchIn points to a temporary buffer of size blockSize. + * @param[in] pScratchOut points to a temporary buffer of size blockSize. + * @param[in] blockSize number of input samples to process per call. + */ + void arm_fir_sparse_q7( + arm_fir_sparse_instance_q7 * S, + const q7_t * pSrc, + q7_t * pDst, + q7_t * pScratchIn, + q31_t * pScratchOut, + uint32_t blockSize); + + + /** + * @brief Initialization function for the Q7 sparse FIR filter. + * @param[in,out] S points to an instance of the Q7 sparse FIR structure. + * @param[in] numTaps number of nonzero coefficients in the filter. + * @param[in] pCoeffs points to the array of filter coefficients. + * @param[in] pState points to the state buffer. + * @param[in] pTapDelay points to the array of offset times. + * @param[in] maxDelay maximum offset time supported. + * @param[in] blockSize number of samples that will be processed per block. + */ + void arm_fir_sparse_init_q7( + arm_fir_sparse_instance_q7 * S, + uint16_t numTaps, + const q7_t * pCoeffs, + q7_t * pState, + int32_t * pTapDelay, + uint16_t maxDelay, + uint32_t blockSize); + + + /** + * @brief Floating-point sin_cos function. + * @param[in] theta input value in degrees + * @param[out] pSinVal points to the processed sine output. + * @param[out] pCosVal points to the processed cos output. + */ + void arm_sin_cos_f32( + float32_t theta, + float32_t * pSinVal, + float32_t * pCosVal); + + + /** + * @brief Q31 sin_cos function. + * @param[in] theta scaled input value in degrees + * @param[out] pSinVal points to the processed sine output. + * @param[out] pCosVal points to the processed cosine output. + */ + void arm_sin_cos_q31( + q31_t theta, + q31_t * pSinVal, + q31_t * pCosVal); + + + /** + * @brief Floating-point complex conjugate. + * @param[in] pSrc points to the input vector + * @param[out] pDst points to the output vector + * @param[in] numSamples number of complex samples in each vector + */ + void arm_cmplx_conj_f32( + const float32_t * pSrc, + float32_t * pDst, + uint32_t numSamples); + + /** + * @brief Q31 complex conjugate. + * @param[in] pSrc points to the input vector + * @param[out] pDst points to the output vector + * @param[in] numSamples number of complex samples in each vector + */ + void arm_cmplx_conj_q31( + const q31_t * pSrc, + q31_t * pDst, + uint32_t numSamples); + + + /** + * @brief Q15 complex conjugate. + * @param[in] pSrc points to the input vector + * @param[out] pDst points to the output vector + * @param[in] numSamples number of complex samples in each vector + */ + void arm_cmplx_conj_q15( + const q15_t * pSrc, + q15_t * pDst, + uint32_t numSamples); + + + /** + * @brief Floating-point complex magnitude squared + * @param[in] pSrc points to the complex input vector + * @param[out] pDst points to the real output vector + * @param[in] numSamples number of complex samples in the input vector + */ + void arm_cmplx_mag_squared_f32( + const float32_t * pSrc, + float32_t * pDst, + uint32_t numSamples); + + + /** + * @brief Q31 complex magnitude squared + * @param[in] pSrc points to the complex input vector + * @param[out] pDst points to the real output vector + * @param[in] numSamples number of complex samples in the input vector + */ + void arm_cmplx_mag_squared_q31( + const q31_t * pSrc, + q31_t * pDst, + uint32_t numSamples); + + + /** + * @brief Q15 complex magnitude squared + * @param[in] pSrc points to the complex input vector + * @param[out] pDst points to the real output vector + * @param[in] numSamples number of complex samples in the input vector + */ + void arm_cmplx_mag_squared_q15( + const q15_t * pSrc, + q15_t * pDst, + uint32_t numSamples); + + + /** + * @ingroup groupController + */ + + /** + * @defgroup PID PID Motor Control + * + * A Proportional Integral Derivative (PID) controller is a generic feedback control + * loop mechanism widely used in industrial control systems. + * A PID controller is the most commonly used type of feedback controller. + * + * This set of functions implements (PID) controllers + * for Q15, Q31, and floating-point data types. The functions operate on a single sample + * of data and each call to the function returns a single processed value. + * S points to an instance of the PID control data structure. in + * is the input sample value. The functions return the output value. + * + * \par Algorithm: + *
+   *    y[n] = y[n-1] + A0 * x[n] + A1 * x[n-1] + A2 * x[n-2]
+   *    A0 = Kp + Ki + Kd
+   *    A1 = (-Kp ) - (2 * Kd )
+   *    A2 = Kd
+   * 
+ * + * \par + * where \c Kp is proportional constant, \c Ki is Integral constant and \c Kd is Derivative constant + * + * \par + * \image html PID.gif "Proportional Integral Derivative Controller" + * + * \par + * The PID controller calculates an "error" value as the difference between + * the measured output and the reference input. + * The controller attempts to minimize the error by adjusting the process control inputs. + * The proportional value determines the reaction to the current error, + * the integral value determines the reaction based on the sum of recent errors, + * and the derivative value determines the reaction based on the rate at which the error has been changing. + * + * \par Instance Structure + * The Gains A0, A1, A2 and state variables for a PID controller are stored together in an instance data structure. + * A separate instance structure must be defined for each PID Controller. + * There are separate instance structure declarations for each of the 3 supported data types. + * + * \par Reset Functions + * There is also an associated reset function for each data type which clears the state array. + * + * \par Initialization Functions + * There is also an associated initialization function for each data type. + * The initialization function performs the following operations: + * - Initializes the Gains A0, A1, A2 from Kp,Ki, Kd gains. + * - Zeros out the values in the state buffer. + * + * \par + * Instance structure cannot be placed into a const data section and it is recommended to use the initialization function. + * + * \par Fixed-Point Behavior + * Care must be taken when using the fixed-point versions of the PID Controller functions. + * In particular, the overflow and saturation behavior of the accumulator used in each function must be considered. + * Refer to the function specific documentation below for usage guidelines. + */ + + /** + * @addtogroup PID + * @{ + */ + + /** + * @brief Process function for the floating-point PID Control. + * @param[in,out] S is an instance of the floating-point PID Control structure + * @param[in] in input sample to process + * @return processed output sample. + */ + __STATIC_FORCEINLINE float32_t arm_pid_f32( + arm_pid_instance_f32 * S, + float32_t in) + { + float32_t out; + + /* y[n] = y[n-1] + A0 * x[n] + A1 * x[n-1] + A2 * x[n-2] */ + out = (S->A0 * in) + + (S->A1 * S->state[0]) + (S->A2 * S->state[1]) + (S->state[2]); + + /* Update state */ + S->state[1] = S->state[0]; + S->state[0] = in; + S->state[2] = out; + + /* return to application */ + return (out); + + } + +/** + @brief Process function for the Q31 PID Control. + @param[in,out] S points to an instance of the Q31 PID Control structure + @param[in] in input sample to process + @return processed output sample. + + \par Scaling and Overflow Behavior + The function is implemented using an internal 64-bit accumulator. + The accumulator has a 2.62 format and maintains full precision of the intermediate multiplication results but provides only a single guard bit. + Thus, if the accumulator result overflows it wraps around rather than clip. + In order to avoid overflows completely the input signal must be scaled down by 2 bits as there are four additions. + After all multiply-accumulates are performed, the 2.62 accumulator is truncated to 1.32 format and then saturated to 1.31 format. + */ +__STATIC_FORCEINLINE q31_t arm_pid_q31( + arm_pid_instance_q31 * S, + q31_t in) + { + q63_t acc; + q31_t out; + + /* acc = A0 * x[n] */ + acc = (q63_t) S->A0 * in; + + /* acc += A1 * x[n-1] */ + acc += (q63_t) S->A1 * S->state[0]; + + /* acc += A2 * x[n-2] */ + acc += (q63_t) S->A2 * S->state[1]; + + /* convert output to 1.31 format to add y[n-1] */ + out = (q31_t) (acc >> 31U); + + /* out += y[n-1] */ + out += S->state[2]; + + /* Update state */ + S->state[1] = S->state[0]; + S->state[0] = in; + S->state[2] = out; + + /* return to application */ + return (out); + } + + +/** + @brief Process function for the Q15 PID Control. + @param[in,out] S points to an instance of the Q15 PID Control structure + @param[in] in input sample to process + @return processed output sample. + + \par Scaling and Overflow Behavior + The function is implemented using a 64-bit internal accumulator. + Both Gains and state variables are represented in 1.15 format and multiplications yield a 2.30 result. + The 2.30 intermediate results are accumulated in a 64-bit accumulator in 34.30 format. + There is no risk of internal overflow with this approach and the full precision of intermediate multiplications is preserved. + After all additions have been performed, the accumulator is truncated to 34.15 format by discarding low 15 bits. + Lastly, the accumulator is saturated to yield a result in 1.15 format. + */ +__STATIC_FORCEINLINE q15_t arm_pid_q15( + arm_pid_instance_q15 * S, + q15_t in) + { + q63_t acc; + q15_t out; + +#if defined (ARM_MATH_DSP) + /* Implementation of PID controller */ + + /* acc = A0 * x[n] */ + acc = (q31_t) __SMUAD((uint32_t)S->A0, (uint32_t)in); + + /* acc += A1 * x[n-1] + A2 * x[n-2] */ + acc = (q63_t)__SMLALD((uint32_t)S->A1, (uint32_t)read_q15x2 (S->state), (uint64_t)acc); +#else + /* acc = A0 * x[n] */ + acc = ((q31_t) S->A0) * in; + + /* acc += A1 * x[n-1] + A2 * x[n-2] */ + acc += (q31_t) S->A1 * S->state[0]; + acc += (q31_t) S->A2 * S->state[1]; +#endif + + /* acc += y[n-1] */ + acc += (q31_t) S->state[2] << 15; + + /* saturate the output */ + out = (q15_t) (__SSAT((q31_t)(acc >> 15), 16)); + + /* Update state */ + S->state[1] = S->state[0]; + S->state[0] = in; + S->state[2] = out; + + /* return to application */ + return (out); + } + + /** + * @} end of PID group + */ + + + /** + * @brief Floating-point matrix inverse. + * @param[in] src points to the instance of the input floating-point matrix structure. + * @param[out] dst points to the instance of the output floating-point matrix structure. + * @return The function returns ARM_MATH_SIZE_MISMATCH, if the dimensions do not match. + * If the input matrix is singular (does not have an inverse), then the algorithm terminates and returns error status ARM_MATH_SINGULAR. + */ + arm_status arm_mat_inverse_f32( + const arm_matrix_instance_f32 * src, + arm_matrix_instance_f32 * dst); + + + /** + * @brief Floating-point matrix inverse. + * @param[in] src points to the instance of the input floating-point matrix structure. + * @param[out] dst points to the instance of the output floating-point matrix structure. + * @return The function returns ARM_MATH_SIZE_MISMATCH, if the dimensions do not match. + * If the input matrix is singular (does not have an inverse), then the algorithm terminates and returns error status ARM_MATH_SINGULAR. + */ + arm_status arm_mat_inverse_f64( + const arm_matrix_instance_f64 * src, + arm_matrix_instance_f64 * dst); + + + + /** + * @ingroup groupController + */ + + /** + * @defgroup clarke Vector Clarke Transform + * Forward Clarke transform converts the instantaneous stator phases into a two-coordinate time invariant vector. + * Generally the Clarke transform uses three-phase currents Ia, Ib and Ic to calculate currents + * in the two-phase orthogonal stator axis Ialpha and Ibeta. + * When Ialpha is superposed with Ia as shown in the figure below + * \image html clarke.gif Stator current space vector and its components in (a,b). + * and Ia + Ib + Ic = 0, in this condition Ialpha and Ibeta + * can be calculated using only Ia and Ib. + * + * The function operates on a single sample of data and each call to the function returns the processed output. + * The library provides separate functions for Q31 and floating-point data types. + * \par Algorithm + * \image html clarkeFormula.gif + * where Ia and Ib are the instantaneous stator phases and + * pIalpha and pIbeta are the two coordinates of time invariant vector. + * \par Fixed-Point Behavior + * Care must be taken when using the Q31 version of the Clarke transform. + * In particular, the overflow and saturation behavior of the accumulator used must be considered. + * Refer to the function specific documentation below for usage guidelines. + */ + + /** + * @addtogroup clarke + * @{ + */ + + /** + * + * @brief Floating-point Clarke transform + * @param[in] Ia input three-phase coordinate a + * @param[in] Ib input three-phase coordinate b + * @param[out] pIalpha points to output two-phase orthogonal vector axis alpha + * @param[out] pIbeta points to output two-phase orthogonal vector axis beta + * @return none + */ + __STATIC_FORCEINLINE void arm_clarke_f32( + float32_t Ia, + float32_t Ib, + float32_t * pIalpha, + float32_t * pIbeta) + { + /* Calculate pIalpha using the equation, pIalpha = Ia */ + *pIalpha = Ia; + + /* Calculate pIbeta using the equation, pIbeta = (1/sqrt(3)) * Ia + (2/sqrt(3)) * Ib */ + *pIbeta = ((float32_t) 0.57735026919 * Ia + (float32_t) 1.15470053838 * Ib); + } + + +/** + @brief Clarke transform for Q31 version + @param[in] Ia input three-phase coordinate a + @param[in] Ib input three-phase coordinate b + @param[out] pIalpha points to output two-phase orthogonal vector axis alpha + @param[out] pIbeta points to output two-phase orthogonal vector axis beta + @return none + + \par Scaling and Overflow Behavior + The function is implemented using an internal 32-bit accumulator. + The accumulator maintains 1.31 format by truncating lower 31 bits of the intermediate multiplication in 2.62 format. + There is saturation on the addition, hence there is no risk of overflow. + */ +__STATIC_FORCEINLINE void arm_clarke_q31( + q31_t Ia, + q31_t Ib, + q31_t * pIalpha, + q31_t * pIbeta) + { + q31_t product1, product2; /* Temporary variables used to store intermediate results */ + + /* Calculating pIalpha from Ia by equation pIalpha = Ia */ + *pIalpha = Ia; + + /* Intermediate product is calculated by (1/(sqrt(3)) * Ia) */ + product1 = (q31_t) (((q63_t) Ia * 0x24F34E8B) >> 30); + + /* Intermediate product is calculated by (2/sqrt(3) * Ib) */ + product2 = (q31_t) (((q63_t) Ib * 0x49E69D16) >> 30); + + /* pIbeta is calculated by adding the intermediate products */ + *pIbeta = __QADD(product1, product2); + } + + /** + * @} end of clarke group + */ + + + /** + * @ingroup groupController + */ + + /** + * @defgroup inv_clarke Vector Inverse Clarke Transform + * Inverse Clarke transform converts the two-coordinate time invariant vector into instantaneous stator phases. + * + * The function operates on a single sample of data and each call to the function returns the processed output. + * The library provides separate functions for Q31 and floating-point data types. + * \par Algorithm + * \image html clarkeInvFormula.gif + * where pIa and pIb are the instantaneous stator phases and + * Ialpha and Ibeta are the two coordinates of time invariant vector. + * \par Fixed-Point Behavior + * Care must be taken when using the Q31 version of the Clarke transform. + * In particular, the overflow and saturation behavior of the accumulator used must be considered. + * Refer to the function specific documentation below for usage guidelines. + */ + + /** + * @addtogroup inv_clarke + * @{ + */ + + /** + * @brief Floating-point Inverse Clarke transform + * @param[in] Ialpha input two-phase orthogonal vector axis alpha + * @param[in] Ibeta input two-phase orthogonal vector axis beta + * @param[out] pIa points to output three-phase coordinate a + * @param[out] pIb points to output three-phase coordinate b + * @return none + */ + __STATIC_FORCEINLINE void arm_inv_clarke_f32( + float32_t Ialpha, + float32_t Ibeta, + float32_t * pIa, + float32_t * pIb) + { + /* Calculating pIa from Ialpha by equation pIa = Ialpha */ + *pIa = Ialpha; + + /* Calculating pIb from Ialpha and Ibeta by equation pIb = -(1/2) * Ialpha + (sqrt(3)/2) * Ibeta */ + *pIb = -0.5f * Ialpha + 0.8660254039f * Ibeta; + } + + +/** + @brief Inverse Clarke transform for Q31 version + @param[in] Ialpha input two-phase orthogonal vector axis alpha + @param[in] Ibeta input two-phase orthogonal vector axis beta + @param[out] pIa points to output three-phase coordinate a + @param[out] pIb points to output three-phase coordinate b + @return none + + \par Scaling and Overflow Behavior + The function is implemented using an internal 32-bit accumulator. + The accumulator maintains 1.31 format by truncating lower 31 bits of the intermediate multiplication in 2.62 format. + There is saturation on the subtraction, hence there is no risk of overflow. + */ +__STATIC_FORCEINLINE void arm_inv_clarke_q31( + q31_t Ialpha, + q31_t Ibeta, + q31_t * pIa, + q31_t * pIb) + { + q31_t product1, product2; /* Temporary variables used to store intermediate results */ + + /* Calculating pIa from Ialpha by equation pIa = Ialpha */ + *pIa = Ialpha; + + /* Intermediate product is calculated by (1/(2*sqrt(3)) * Ia) */ + product1 = (q31_t) (((q63_t) (Ialpha) * (0x40000000)) >> 31); + + /* Intermediate product is calculated by (1/sqrt(3) * pIb) */ + product2 = (q31_t) (((q63_t) (Ibeta) * (0x6ED9EBA1)) >> 31); + + /* pIb is calculated by subtracting the products */ + *pIb = __QSUB(product2, product1); + } + + /** + * @} end of inv_clarke group + */ + + + + /** + * @ingroup groupController + */ + + /** + * @defgroup park Vector Park Transform + * + * Forward Park transform converts the input two-coordinate vector to flux and torque components. + * The Park transform can be used to realize the transformation of the Ialpha and the Ibeta currents + * from the stationary to the moving reference frame and control the spatial relationship between + * the stator vector current and rotor flux vector. + * If we consider the d axis aligned with the rotor flux, the diagram below shows the + * current vector and the relationship from the two reference frames: + * \image html park.gif "Stator current space vector and its component in (a,b) and in the d,q rotating reference frame" + * + * The function operates on a single sample of data and each call to the function returns the processed output. + * The library provides separate functions for Q31 and floating-point data types. + * \par Algorithm + * \image html parkFormula.gif + * where Ialpha and Ibeta are the stator vector components, + * pId and pIq are rotor vector components and cosVal and sinVal are the + * cosine and sine values of theta (rotor flux position). + * \par Fixed-Point Behavior + * Care must be taken when using the Q31 version of the Park transform. + * In particular, the overflow and saturation behavior of the accumulator used must be considered. + * Refer to the function specific documentation below for usage guidelines. + */ + + /** + * @addtogroup park + * @{ + */ + + /** + * @brief Floating-point Park transform + * @param[in] Ialpha input two-phase vector coordinate alpha + * @param[in] Ibeta input two-phase vector coordinate beta + * @param[out] pId points to output rotor reference frame d + * @param[out] pIq points to output rotor reference frame q + * @param[in] sinVal sine value of rotation angle theta + * @param[in] cosVal cosine value of rotation angle theta + * @return none + * + * The function implements the forward Park transform. + * + */ + __STATIC_FORCEINLINE void arm_park_f32( + float32_t Ialpha, + float32_t Ibeta, + float32_t * pId, + float32_t * pIq, + float32_t sinVal, + float32_t cosVal) + { + /* Calculate pId using the equation, pId = Ialpha * cosVal + Ibeta * sinVal */ + *pId = Ialpha * cosVal + Ibeta * sinVal; + + /* Calculate pIq using the equation, pIq = - Ialpha * sinVal + Ibeta * cosVal */ + *pIq = -Ialpha * sinVal + Ibeta * cosVal; + } + + +/** + @brief Park transform for Q31 version + @param[in] Ialpha input two-phase vector coordinate alpha + @param[in] Ibeta input two-phase vector coordinate beta + @param[out] pId points to output rotor reference frame d + @param[out] pIq points to output rotor reference frame q + @param[in] sinVal sine value of rotation angle theta + @param[in] cosVal cosine value of rotation angle theta + @return none + + \par Scaling and Overflow Behavior + The function is implemented using an internal 32-bit accumulator. + The accumulator maintains 1.31 format by truncating lower 31 bits of the intermediate multiplication in 2.62 format. + There is saturation on the addition and subtraction, hence there is no risk of overflow. + */ +__STATIC_FORCEINLINE void arm_park_q31( + q31_t Ialpha, + q31_t Ibeta, + q31_t * pId, + q31_t * pIq, + q31_t sinVal, + q31_t cosVal) + { + q31_t product1, product2; /* Temporary variables used to store intermediate results */ + q31_t product3, product4; /* Temporary variables used to store intermediate results */ + + /* Intermediate product is calculated by (Ialpha * cosVal) */ + product1 = (q31_t) (((q63_t) (Ialpha) * (cosVal)) >> 31); + + /* Intermediate product is calculated by (Ibeta * sinVal) */ + product2 = (q31_t) (((q63_t) (Ibeta) * (sinVal)) >> 31); + + + /* Intermediate product is calculated by (Ialpha * sinVal) */ + product3 = (q31_t) (((q63_t) (Ialpha) * (sinVal)) >> 31); + + /* Intermediate product is calculated by (Ibeta * cosVal) */ + product4 = (q31_t) (((q63_t) (Ibeta) * (cosVal)) >> 31); + + /* Calculate pId by adding the two intermediate products 1 and 2 */ + *pId = __QADD(product1, product2); + + /* Calculate pIq by subtracting the two intermediate products 3 from 4 */ + *pIq = __QSUB(product4, product3); + } + + /** + * @} end of park group + */ + + + /** + * @ingroup groupController + */ + + /** + * @defgroup inv_park Vector Inverse Park transform + * Inverse Park transform converts the input flux and torque components to two-coordinate vector. + * + * The function operates on a single sample of data and each call to the function returns the processed output. + * The library provides separate functions for Q31 and floating-point data types. + * \par Algorithm + * \image html parkInvFormula.gif + * where pIalpha and pIbeta are the stator vector components, + * Id and Iq are rotor vector components and cosVal and sinVal are the + * cosine and sine values of theta (rotor flux position). + * \par Fixed-Point Behavior + * Care must be taken when using the Q31 version of the Park transform. + * In particular, the overflow and saturation behavior of the accumulator used must be considered. + * Refer to the function specific documentation below for usage guidelines. + */ + + /** + * @addtogroup inv_park + * @{ + */ + + /** + * @brief Floating-point Inverse Park transform + * @param[in] Id input coordinate of rotor reference frame d + * @param[in] Iq input coordinate of rotor reference frame q + * @param[out] pIalpha points to output two-phase orthogonal vector axis alpha + * @param[out] pIbeta points to output two-phase orthogonal vector axis beta + * @param[in] sinVal sine value of rotation angle theta + * @param[in] cosVal cosine value of rotation angle theta + * @return none + */ + __STATIC_FORCEINLINE void arm_inv_park_f32( + float32_t Id, + float32_t Iq, + float32_t * pIalpha, + float32_t * pIbeta, + float32_t sinVal, + float32_t cosVal) + { + /* Calculate pIalpha using the equation, pIalpha = Id * cosVal - Iq * sinVal */ + *pIalpha = Id * cosVal - Iq * sinVal; + + /* Calculate pIbeta using the equation, pIbeta = Id * sinVal + Iq * cosVal */ + *pIbeta = Id * sinVal + Iq * cosVal; + } + + +/** + @brief Inverse Park transform for Q31 version + @param[in] Id input coordinate of rotor reference frame d + @param[in] Iq input coordinate of rotor reference frame q + @param[out] pIalpha points to output two-phase orthogonal vector axis alpha + @param[out] pIbeta points to output two-phase orthogonal vector axis beta + @param[in] sinVal sine value of rotation angle theta + @param[in] cosVal cosine value of rotation angle theta + @return none + + @par Scaling and Overflow Behavior + The function is implemented using an internal 32-bit accumulator. + The accumulator maintains 1.31 format by truncating lower 31 bits of the intermediate multiplication in 2.62 format. + There is saturation on the addition, hence there is no risk of overflow. + */ +__STATIC_FORCEINLINE void arm_inv_park_q31( + q31_t Id, + q31_t Iq, + q31_t * pIalpha, + q31_t * pIbeta, + q31_t sinVal, + q31_t cosVal) + { + q31_t product1, product2; /* Temporary variables used to store intermediate results */ + q31_t product3, product4; /* Temporary variables used to store intermediate results */ + + /* Intermediate product is calculated by (Id * cosVal) */ + product1 = (q31_t) (((q63_t) (Id) * (cosVal)) >> 31); + + /* Intermediate product is calculated by (Iq * sinVal) */ + product2 = (q31_t) (((q63_t) (Iq) * (sinVal)) >> 31); + + + /* Intermediate product is calculated by (Id * sinVal) */ + product3 = (q31_t) (((q63_t) (Id) * (sinVal)) >> 31); + + /* Intermediate product is calculated by (Iq * cosVal) */ + product4 = (q31_t) (((q63_t) (Iq) * (cosVal)) >> 31); + + /* Calculate pIalpha by using the two intermediate products 1 and 2 */ + *pIalpha = __QSUB(product1, product2); + + /* Calculate pIbeta by using the two intermediate products 3 and 4 */ + *pIbeta = __QADD(product4, product3); + } + + /** + * @} end of Inverse park group + */ + + + /** + * @ingroup groupInterpolation + */ + + /** + * @defgroup LinearInterpolate Linear Interpolation + * + * Linear interpolation is a method of curve fitting using linear polynomials. + * Linear interpolation works by effectively drawing a straight line between two neighboring samples and returning the appropriate point along that line + * + * \par + * \image html LinearInterp.gif "Linear interpolation" + * + * \par + * A Linear Interpolate function calculates an output value(y), for the input(x) + * using linear interpolation of the input values x0, x1( nearest input values) and the output values y0 and y1(nearest output values) + * + * \par Algorithm: + *
+   *       y = y0 + (x - x0) * ((y1 - y0)/(x1-x0))
+   *       where x0, x1 are nearest values of input x
+   *             y0, y1 are nearest values to output y
+   * 
+ * + * \par + * This set of functions implements Linear interpolation process + * for Q7, Q15, Q31, and floating-point data types. The functions operate on a single + * sample of data and each call to the function returns a single processed value. + * S points to an instance of the Linear Interpolate function data structure. + * x is the input sample value. The functions returns the output value. + * + * \par + * if x is outside of the table boundary, Linear interpolation returns first value of the table + * if x is below input range and returns last value of table if x is above range. + */ + + /** + * @addtogroup LinearInterpolate + * @{ + */ + + /** + * @brief Process function for the floating-point Linear Interpolation Function. + * @param[in,out] S is an instance of the floating-point Linear Interpolation structure + * @param[in] x input sample to process + * @return y processed output sample. + * + */ + __STATIC_FORCEINLINE float32_t arm_linear_interp_f32( + arm_linear_interp_instance_f32 * S, + float32_t x) + { + float32_t y; + float32_t x0, x1; /* Nearest input values */ + float32_t y0, y1; /* Nearest output values */ + float32_t xSpacing = S->xSpacing; /* spacing between input values */ + int32_t i; /* Index variable */ + float32_t *pYData = S->pYData; /* pointer to output table */ + + /* Calculation of index */ + i = (int32_t) ((x - S->x1) / xSpacing); + + if (i < 0) + { + /* Iniatilize output for below specified range as least output value of table */ + y = pYData[0]; + } + else if ((uint32_t)i >= (S->nValues - 1)) + { + /* Iniatilize output for above specified range as last output value of table */ + y = pYData[S->nValues - 1]; + } + else + { + /* Calculation of nearest input values */ + x0 = S->x1 + i * xSpacing; + x1 = S->x1 + (i + 1) * xSpacing; + + /* Read of nearest output values */ + y0 = pYData[i]; + y1 = pYData[i + 1]; + + /* Calculation of output */ + y = y0 + (x - x0) * ((y1 - y0) / (x1 - x0)); + + } + + /* returns output value */ + return (y); + } + + + /** + * + * @brief Process function for the Q31 Linear Interpolation Function. + * @param[in] pYData pointer to Q31 Linear Interpolation table + * @param[in] x input sample to process + * @param[in] nValues number of table values + * @return y processed output sample. + * + * \par + * Input sample x is in 12.20 format which contains 12 bits for table index and 20 bits for fractional part. + * This function can support maximum of table size 2^12. + * + */ + __STATIC_FORCEINLINE q31_t arm_linear_interp_q31( + q31_t * pYData, + q31_t x, + uint32_t nValues) + { + q31_t y; /* output */ + q31_t y0, y1; /* Nearest output values */ + q31_t fract; /* fractional part */ + int32_t index; /* Index to read nearest output values */ + + /* Input is in 12.20 format */ + /* 12 bits for the table index */ + /* Index value calculation */ + index = ((x & (q31_t)0xFFF00000) >> 20); + + if (index >= (int32_t)(nValues - 1)) + { + return (pYData[nValues - 1]); + } + else if (index < 0) + { + return (pYData[0]); + } + else + { + /* 20 bits for the fractional part */ + /* shift left by 11 to keep fract in 1.31 format */ + fract = (x & 0x000FFFFF) << 11; + + /* Read two nearest output values from the index in 1.31(q31) format */ + y0 = pYData[index]; + y1 = pYData[index + 1]; + + /* Calculation of y0 * (1-fract) and y is in 2.30 format */ + y = ((q31_t) ((q63_t) y0 * (0x7FFFFFFF - fract) >> 32)); + + /* Calculation of y0 * (1-fract) + y1 *fract and y is in 2.30 format */ + y += ((q31_t) (((q63_t) y1 * fract) >> 32)); + + /* Convert y to 1.31 format */ + return (y << 1U); + } + } + + + /** + * + * @brief Process function for the Q15 Linear Interpolation Function. + * @param[in] pYData pointer to Q15 Linear Interpolation table + * @param[in] x input sample to process + * @param[in] nValues number of table values + * @return y processed output sample. + * + * \par + * Input sample x is in 12.20 format which contains 12 bits for table index and 20 bits for fractional part. + * This function can support maximum of table size 2^12. + * + */ + __STATIC_FORCEINLINE q15_t arm_linear_interp_q15( + q15_t * pYData, + q31_t x, + uint32_t nValues) + { + q63_t y; /* output */ + q15_t y0, y1; /* Nearest output values */ + q31_t fract; /* fractional part */ + int32_t index; /* Index to read nearest output values */ + + /* Input is in 12.20 format */ + /* 12 bits for the table index */ + /* Index value calculation */ + index = ((x & (int32_t)0xFFF00000) >> 20); + + if (index >= (int32_t)(nValues - 1)) + { + return (pYData[nValues - 1]); + } + else if (index < 0) + { + return (pYData[0]); + } + else + { + /* 20 bits for the fractional part */ + /* fract is in 12.20 format */ + fract = (x & 0x000FFFFF); + + /* Read two nearest output values from the index */ + y0 = pYData[index]; + y1 = pYData[index + 1]; + + /* Calculation of y0 * (1-fract) and y is in 13.35 format */ + y = ((q63_t) y0 * (0xFFFFF - fract)); + + /* Calculation of (y0 * (1-fract) + y1 * fract) and y is in 13.35 format */ + y += ((q63_t) y1 * (fract)); + + /* convert y to 1.15 format */ + return (q15_t) (y >> 20); + } + } + + + /** + * + * @brief Process function for the Q7 Linear Interpolation Function. + * @param[in] pYData pointer to Q7 Linear Interpolation table + * @param[in] x input sample to process + * @param[in] nValues number of table values + * @return y processed output sample. + * + * \par + * Input sample x is in 12.20 format which contains 12 bits for table index and 20 bits for fractional part. + * This function can support maximum of table size 2^12. + */ + __STATIC_FORCEINLINE q7_t arm_linear_interp_q7( + q7_t * pYData, + q31_t x, + uint32_t nValues) + { + q31_t y; /* output */ + q7_t y0, y1; /* Nearest output values */ + q31_t fract; /* fractional part */ + uint32_t index; /* Index to read nearest output values */ + + /* Input is in 12.20 format */ + /* 12 bits for the table index */ + /* Index value calculation */ + if (x < 0) + { + return (pYData[0]); + } + index = (x >> 20) & 0xfff; + + if (index >= (nValues - 1)) + { + return (pYData[nValues - 1]); + } + else + { + /* 20 bits for the fractional part */ + /* fract is in 12.20 format */ + fract = (x & 0x000FFFFF); + + /* Read two nearest output values from the index and are in 1.7(q7) format */ + y0 = pYData[index]; + y1 = pYData[index + 1]; + + /* Calculation of y0 * (1-fract ) and y is in 13.27(q27) format */ + y = ((y0 * (0xFFFFF - fract))); + + /* Calculation of y1 * fract + y0 * (1-fract) and y is in 13.27(q27) format */ + y += (y1 * fract); + + /* convert y to 1.7(q7) format */ + return (q7_t) (y >> 20); + } + } + + /** + * @} end of LinearInterpolate group + */ + + /** + * @brief Fast approximation to the trigonometric sine function for floating-point data. + * @param[in] x input value in radians. + * @return sin(x). + */ + float32_t arm_sin_f32( + float32_t x); + + + /** + * @brief Fast approximation to the trigonometric sine function for Q31 data. + * @param[in] x Scaled input value in radians. + * @return sin(x). + */ + q31_t arm_sin_q31( + q31_t x); + + + /** + * @brief Fast approximation to the trigonometric sine function for Q15 data. + * @param[in] x Scaled input value in radians. + * @return sin(x). + */ + q15_t arm_sin_q15( + q15_t x); + + + /** + * @brief Fast approximation to the trigonometric cosine function for floating-point data. + * @param[in] x input value in radians. + * @return cos(x). + */ + float32_t arm_cos_f32( + float32_t x); + + + /** + * @brief Fast approximation to the trigonometric cosine function for Q31 data. + * @param[in] x Scaled input value in radians. + * @return cos(x). + */ + q31_t arm_cos_q31( + q31_t x); + + + /** + * @brief Fast approximation to the trigonometric cosine function for Q15 data. + * @param[in] x Scaled input value in radians. + * @return cos(x). + */ + q15_t arm_cos_q15( + q15_t x); + + +/** + @brief Floating-point vector of log values. + @param[in] pSrc points to the input vector + @param[out] pDst points to the output vector + @param[in] blockSize number of samples in each vector + @return none + */ + void arm_vlog_f32( + const float32_t * pSrc, + float32_t * pDst, + uint32_t blockSize); + +/** + @brief Floating-point vector of exp values. + @param[in] pSrc points to the input vector + @param[out] pDst points to the output vector + @param[in] blockSize number of samples in each vector + @return none + */ + void arm_vexp_f32( + const float32_t * pSrc, + float32_t * pDst, + uint32_t blockSize); + + /** + * @ingroup groupFastMath + */ + + + /** + * @defgroup SQRT Square Root + * + * Computes the square root of a number. + * There are separate functions for Q15, Q31, and floating-point data types. + * The square root function is computed using the Newton-Raphson algorithm. + * This is an iterative algorithm of the form: + *
+   *      x1 = x0 - f(x0)/f'(x0)
+   * 
+ * where x1 is the current estimate, + * x0 is the previous estimate, and + * f'(x0) is the derivative of f() evaluated at x0. + * For the square root function, the algorithm reduces to: + *
+   *     x0 = in/2                         [initial guess]
+   *     x1 = 1/2 * ( x0 + in / x0)        [each iteration]
+   * 
+ */ + + + /** + * @addtogroup SQRT + * @{ + */ + +/** + @brief Floating-point square root function. + @param[in] in input value + @param[out] pOut square root of input value + @return execution status + - \ref ARM_MATH_SUCCESS : input value is positive + - \ref ARM_MATH_ARGUMENT_ERROR : input value is negative; *pOut is set to 0 + */ +__STATIC_FORCEINLINE arm_status arm_sqrt_f32( + float32_t in, + float32_t * pOut) + { + if (in >= 0.0f) + { +#if defined ( __CC_ARM ) + #if defined __TARGET_FPU_VFP + *pOut = __sqrtf(in); + #else + *pOut = sqrtf(in); + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + __ASM("VSQRT.F32 %0,%1" : "=t"(*pOut) : "t"(in)); + #else + *pOut = sqrtf(in); + #endif + +#else + *pOut = sqrtf(in); +#endif + + return (ARM_MATH_SUCCESS); + } + else + { + *pOut = 0.0f; + return (ARM_MATH_ARGUMENT_ERROR); + } + } + + +/** + @brief Q31 square root function. + @param[in] in input value. The range of the input value is [0 +1) or 0x00000000 to 0x7FFFFFFF + @param[out] pOut points to square root of input value + @return execution status + - \ref ARM_MATH_SUCCESS : input value is positive + - \ref ARM_MATH_ARGUMENT_ERROR : input value is negative; *pOut is set to 0 + */ +arm_status arm_sqrt_q31( + q31_t in, + q31_t * pOut); + + +/** + @brief Q15 square root function. + @param[in] in input value. The range of the input value is [0 +1) or 0x0000 to 0x7FFF + @param[out] pOut points to square root of input value + @return execution status + - \ref ARM_MATH_SUCCESS : input value is positive + - \ref ARM_MATH_ARGUMENT_ERROR : input value is negative; *pOut is set to 0 + */ +arm_status arm_sqrt_q15( + q15_t in, + q15_t * pOut); + + /** + * @brief Vector Floating-point square root function. + * @param[in] pIn input vector. + * @param[out] pOut vector of square roots of input elements. + * @param[in] len length of input vector. + * @return The function returns ARM_MATH_SUCCESS if input value is positive value or ARM_MATH_ARGUMENT_ERROR if + * in is negative value and returns zero output for negative values. + */ + void arm_vsqrt_f32( + float32_t * pIn, + float32_t * pOut, + uint16_t len); + + void arm_vsqrt_q31( + q31_t * pIn, + q31_t * pOut, + uint16_t len); + + void arm_vsqrt_q15( + q15_t * pIn, + q15_t * pOut, + uint16_t len); + + /** + * @} end of SQRT group + */ + + + /** + * @brief floating-point Circular write function. + */ + __STATIC_FORCEINLINE void arm_circularWrite_f32( + int32_t * circBuffer, + int32_t L, + uint16_t * writeOffset, + int32_t bufferInc, + const int32_t * src, + int32_t srcInc, + uint32_t blockSize) + { + uint32_t i = 0U; + int32_t wOffset; + + /* Copy the value of Index pointer that points + * to the current location where the input samples to be copied */ + wOffset = *writeOffset; + + /* Loop over the blockSize */ + i = blockSize; + + while (i > 0U) + { + /* copy the input sample to the circular buffer */ + circBuffer[wOffset] = *src; + + /* Update the input pointer */ + src += srcInc; + + /* Circularly update wOffset. Watch out for positive and negative value */ + wOffset += bufferInc; + if (wOffset >= L) + wOffset -= L; + + /* Decrement the loop counter */ + i--; + } + + /* Update the index pointer */ + *writeOffset = (uint16_t)wOffset; + } + + + + /** + * @brief floating-point Circular Read function. + */ + __STATIC_FORCEINLINE void arm_circularRead_f32( + int32_t * circBuffer, + int32_t L, + int32_t * readOffset, + int32_t bufferInc, + int32_t * dst, + int32_t * dst_base, + int32_t dst_length, + int32_t dstInc, + uint32_t blockSize) + { + uint32_t i = 0U; + int32_t rOffset; + int32_t* dst_end; + + /* Copy the value of Index pointer that points + * to the current location from where the input samples to be read */ + rOffset = *readOffset; + dst_end = dst_base + dst_length; + + /* Loop over the blockSize */ + i = blockSize; + + while (i > 0U) + { + /* copy the sample from the circular buffer to the destination buffer */ + *dst = circBuffer[rOffset]; + + /* Update the input pointer */ + dst += dstInc; + + if (dst == dst_end) + { + dst = dst_base; + } + + /* Circularly update rOffset. Watch out for positive and negative value */ + rOffset += bufferInc; + + if (rOffset >= L) + { + rOffset -= L; + } + + /* Decrement the loop counter */ + i--; + } + + /* Update the index pointer */ + *readOffset = rOffset; + } + + + /** + * @brief Q15 Circular write function. + */ + __STATIC_FORCEINLINE void arm_circularWrite_q15( + q15_t * circBuffer, + int32_t L, + uint16_t * writeOffset, + int32_t bufferInc, + const q15_t * src, + int32_t srcInc, + uint32_t blockSize) + { + uint32_t i = 0U; + int32_t wOffset; + + /* Copy the value of Index pointer that points + * to the current location where the input samples to be copied */ + wOffset = *writeOffset; + + /* Loop over the blockSize */ + i = blockSize; + + while (i > 0U) + { + /* copy the input sample to the circular buffer */ + circBuffer[wOffset] = *src; + + /* Update the input pointer */ + src += srcInc; + + /* Circularly update wOffset. Watch out for positive and negative value */ + wOffset += bufferInc; + if (wOffset >= L) + wOffset -= L; + + /* Decrement the loop counter */ + i--; + } + + /* Update the index pointer */ + *writeOffset = (uint16_t)wOffset; + } + + + /** + * @brief Q15 Circular Read function. + */ + __STATIC_FORCEINLINE void arm_circularRead_q15( + q15_t * circBuffer, + int32_t L, + int32_t * readOffset, + int32_t bufferInc, + q15_t * dst, + q15_t * dst_base, + int32_t dst_length, + int32_t dstInc, + uint32_t blockSize) + { + uint32_t i = 0; + int32_t rOffset; + q15_t* dst_end; + + /* Copy the value of Index pointer that points + * to the current location from where the input samples to be read */ + rOffset = *readOffset; + + dst_end = dst_base + dst_length; + + /* Loop over the blockSize */ + i = blockSize; + + while (i > 0U) + { + /* copy the sample from the circular buffer to the destination buffer */ + *dst = circBuffer[rOffset]; + + /* Update the input pointer */ + dst += dstInc; + + if (dst == dst_end) + { + dst = dst_base; + } + + /* Circularly update wOffset. Watch out for positive and negative value */ + rOffset += bufferInc; + + if (rOffset >= L) + { + rOffset -= L; + } + + /* Decrement the loop counter */ + i--; + } + + /* Update the index pointer */ + *readOffset = rOffset; + } + + + /** + * @brief Q7 Circular write function. + */ + __STATIC_FORCEINLINE void arm_circularWrite_q7( + q7_t * circBuffer, + int32_t L, + uint16_t * writeOffset, + int32_t bufferInc, + const q7_t * src, + int32_t srcInc, + uint32_t blockSize) + { + uint32_t i = 0U; + int32_t wOffset; + + /* Copy the value of Index pointer that points + * to the current location where the input samples to be copied */ + wOffset = *writeOffset; + + /* Loop over the blockSize */ + i = blockSize; + + while (i > 0U) + { + /* copy the input sample to the circular buffer */ + circBuffer[wOffset] = *src; + + /* Update the input pointer */ + src += srcInc; + + /* Circularly update wOffset. Watch out for positive and negative value */ + wOffset += bufferInc; + if (wOffset >= L) + wOffset -= L; + + /* Decrement the loop counter */ + i--; + } + + /* Update the index pointer */ + *writeOffset = (uint16_t)wOffset; + } + + + /** + * @brief Q7 Circular Read function. + */ + __STATIC_FORCEINLINE void arm_circularRead_q7( + q7_t * circBuffer, + int32_t L, + int32_t * readOffset, + int32_t bufferInc, + q7_t * dst, + q7_t * dst_base, + int32_t dst_length, + int32_t dstInc, + uint32_t blockSize) + { + uint32_t i = 0; + int32_t rOffset; + q7_t* dst_end; + + /* Copy the value of Index pointer that points + * to the current location from where the input samples to be read */ + rOffset = *readOffset; + + dst_end = dst_base + dst_length; + + /* Loop over the blockSize */ + i = blockSize; + + while (i > 0U) + { + /* copy the sample from the circular buffer to the destination buffer */ + *dst = circBuffer[rOffset]; + + /* Update the input pointer */ + dst += dstInc; + + if (dst == dst_end) + { + dst = dst_base; + } + + /* Circularly update rOffset. Watch out for positive and negative value */ + rOffset += bufferInc; + + if (rOffset >= L) + { + rOffset -= L; + } + + /* Decrement the loop counter */ + i--; + } + + /* Update the index pointer */ + *readOffset = rOffset; + } + + + /** + * @brief Sum of the squares of the elements of a Q31 vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + void arm_power_q31( + const q31_t * pSrc, + uint32_t blockSize, + q63_t * pResult); + + + /** + * @brief Sum of the squares of the elements of a floating-point vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + void arm_power_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult); + + + /** + * @brief Sum of the squares of the elements of a Q15 vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + void arm_power_q15( + const q15_t * pSrc, + uint32_t blockSize, + q63_t * pResult); + + + /** + * @brief Sum of the squares of the elements of a Q7 vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + void arm_power_q7( + const q7_t * pSrc, + uint32_t blockSize, + q31_t * pResult); + + + /** + * @brief Mean value of a Q7 vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + void arm_mean_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * pResult); + + + /** + * @brief Mean value of a Q15 vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + void arm_mean_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult); + + + /** + * @brief Mean value of a Q31 vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + void arm_mean_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult); + + + /** + * @brief Mean value of a floating-point vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + void arm_mean_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult); + + + /** + * @brief Variance of the elements of a floating-point vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + void arm_var_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult); + + + /** + * @brief Variance of the elements of a Q31 vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + void arm_var_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult); + + + /** + * @brief Variance of the elements of a Q15 vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + void arm_var_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult); + + + /** + * @brief Root Mean Square of the elements of a floating-point vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + void arm_rms_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult); + + + /** + * @brief Root Mean Square of the elements of a Q31 vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + void arm_rms_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult); + + + /** + * @brief Root Mean Square of the elements of a Q15 vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + void arm_rms_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult); + + + /** + * @brief Standard deviation of the elements of a floating-point vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + void arm_std_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult); + + + /** + * @brief Standard deviation of the elements of a Q31 vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + void arm_std_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult); + + + /** + * @brief Standard deviation of the elements of a Q15 vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + void arm_std_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult); + + + /** + * @brief Floating-point complex magnitude + * @param[in] pSrc points to the complex input vector + * @param[out] pDst points to the real output vector + * @param[in] numSamples number of complex samples in the input vector + */ + void arm_cmplx_mag_f32( + const float32_t * pSrc, + float32_t * pDst, + uint32_t numSamples); + + + /** + * @brief Q31 complex magnitude + * @param[in] pSrc points to the complex input vector + * @param[out] pDst points to the real output vector + * @param[in] numSamples number of complex samples in the input vector + */ + void arm_cmplx_mag_q31( + const q31_t * pSrc, + q31_t * pDst, + uint32_t numSamples); + + + /** + * @brief Q15 complex magnitude + * @param[in] pSrc points to the complex input vector + * @param[out] pDst points to the real output vector + * @param[in] numSamples number of complex samples in the input vector + */ + void arm_cmplx_mag_q15( + const q15_t * pSrc, + q15_t * pDst, + uint32_t numSamples); + + + /** + * @brief Q15 complex dot product + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[in] numSamples number of complex samples in each vector + * @param[out] realResult real part of the result returned here + * @param[out] imagResult imaginary part of the result returned here + */ + void arm_cmplx_dot_prod_q15( + const q15_t * pSrcA, + const q15_t * pSrcB, + uint32_t numSamples, + q31_t * realResult, + q31_t * imagResult); + + + /** + * @brief Q31 complex dot product + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[in] numSamples number of complex samples in each vector + * @param[out] realResult real part of the result returned here + * @param[out] imagResult imaginary part of the result returned here + */ + void arm_cmplx_dot_prod_q31( + const q31_t * pSrcA, + const q31_t * pSrcB, + uint32_t numSamples, + q63_t * realResult, + q63_t * imagResult); + + + /** + * @brief Floating-point complex dot product + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[in] numSamples number of complex samples in each vector + * @param[out] realResult real part of the result returned here + * @param[out] imagResult imaginary part of the result returned here + */ + void arm_cmplx_dot_prod_f32( + const float32_t * pSrcA, + const float32_t * pSrcB, + uint32_t numSamples, + float32_t * realResult, + float32_t * imagResult); + + + /** + * @brief Q15 complex-by-real multiplication + * @param[in] pSrcCmplx points to the complex input vector + * @param[in] pSrcReal points to the real input vector + * @param[out] pCmplxDst points to the complex output vector + * @param[in] numSamples number of samples in each vector + */ + void arm_cmplx_mult_real_q15( + const q15_t * pSrcCmplx, + const q15_t * pSrcReal, + q15_t * pCmplxDst, + uint32_t numSamples); + + + /** + * @brief Q31 complex-by-real multiplication + * @param[in] pSrcCmplx points to the complex input vector + * @param[in] pSrcReal points to the real input vector + * @param[out] pCmplxDst points to the complex output vector + * @param[in] numSamples number of samples in each vector + */ + void arm_cmplx_mult_real_q31( + const q31_t * pSrcCmplx, + const q31_t * pSrcReal, + q31_t * pCmplxDst, + uint32_t numSamples); + + + /** + * @brief Floating-point complex-by-real multiplication + * @param[in] pSrcCmplx points to the complex input vector + * @param[in] pSrcReal points to the real input vector + * @param[out] pCmplxDst points to the complex output vector + * @param[in] numSamples number of samples in each vector + */ + void arm_cmplx_mult_real_f32( + const float32_t * pSrcCmplx, + const float32_t * pSrcReal, + float32_t * pCmplxDst, + uint32_t numSamples); + + + /** + * @brief Minimum value of a Q7 vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] result is output pointer + * @param[in] index is the array index of the minimum value in the input buffer. + */ + void arm_min_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * result, + uint32_t * index); + + + /** + * @brief Minimum value of a Q15 vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output pointer + * @param[in] pIndex is the array index of the minimum value in the input buffer. + */ + void arm_min_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult, + uint32_t * pIndex); + + + /** + * @brief Minimum value of a Q31 vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output pointer + * @param[out] pIndex is the array index of the minimum value in the input buffer. + */ + void arm_min_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult, + uint32_t * pIndex); + + + /** + * @brief Minimum value of a floating-point vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output pointer + * @param[out] pIndex is the array index of the minimum value in the input buffer. + */ + void arm_min_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult, + uint32_t * pIndex); + + +/** + * @brief Maximum value of a Q7 vector. + * @param[in] pSrc points to the input buffer + * @param[in] blockSize length of the input vector + * @param[out] pResult maximum value returned here + * @param[out] pIndex index of maximum value returned here + */ + void arm_max_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * pResult, + uint32_t * pIndex); + + +/** + * @brief Maximum value of a Q15 vector. + * @param[in] pSrc points to the input buffer + * @param[in] blockSize length of the input vector + * @param[out] pResult maximum value returned here + * @param[out] pIndex index of maximum value returned here + */ + void arm_max_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult, + uint32_t * pIndex); + + +/** + * @brief Maximum value of a Q31 vector. + * @param[in] pSrc points to the input buffer + * @param[in] blockSize length of the input vector + * @param[out] pResult maximum value returned here + * @param[out] pIndex index of maximum value returned here + */ + void arm_max_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult, + uint32_t * pIndex); + + +/** + * @brief Maximum value of a floating-point vector. + * @param[in] pSrc points to the input buffer + * @param[in] blockSize length of the input vector + * @param[out] pResult maximum value returned here + * @param[out] pIndex index of maximum value returned here + */ + void arm_max_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult, + uint32_t * pIndex); + + /** + @brief Maximum value of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult maximum value returned here + @return none + */ + void arm_max_no_idx_f32( + const float32_t *pSrc, + uint32_t blockSize, + float32_t *pResult); + + /** + * @brief Q15 complex-by-complex multiplication + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[out] pDst points to the output vector + * @param[in] numSamples number of complex samples in each vector + */ + void arm_cmplx_mult_cmplx_q15( + const q15_t * pSrcA, + const q15_t * pSrcB, + q15_t * pDst, + uint32_t numSamples); + + + /** + * @brief Q31 complex-by-complex multiplication + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[out] pDst points to the output vector + * @param[in] numSamples number of complex samples in each vector + */ + void arm_cmplx_mult_cmplx_q31( + const q31_t * pSrcA, + const q31_t * pSrcB, + q31_t * pDst, + uint32_t numSamples); + + + /** + * @brief Floating-point complex-by-complex multiplication + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[out] pDst points to the output vector + * @param[in] numSamples number of complex samples in each vector + */ + void arm_cmplx_mult_cmplx_f32( + const float32_t * pSrcA, + const float32_t * pSrcB, + float32_t * pDst, + uint32_t numSamples); + + + /** + * @brief Converts the elements of the floating-point vector to Q31 vector. + * @param[in] pSrc points to the floating-point input vector + * @param[out] pDst points to the Q31 output vector + * @param[in] blockSize length of the input vector + */ + void arm_float_to_q31( + const float32_t * pSrc, + q31_t * pDst, + uint32_t blockSize); + + + /** + * @brief Converts the elements of the floating-point vector to Q15 vector. + * @param[in] pSrc points to the floating-point input vector + * @param[out] pDst points to the Q15 output vector + * @param[in] blockSize length of the input vector + */ + void arm_float_to_q15( + const float32_t * pSrc, + q15_t * pDst, + uint32_t blockSize); + + + /** + * @brief Converts the elements of the floating-point vector to Q7 vector. + * @param[in] pSrc points to the floating-point input vector + * @param[out] pDst points to the Q7 output vector + * @param[in] blockSize length of the input vector + */ + void arm_float_to_q7( + const float32_t * pSrc, + q7_t * pDst, + uint32_t blockSize); + + + /** + * @brief Converts the elements of the Q31 vector to floating-point vector. + * @param[in] pSrc is input pointer + * @param[out] pDst is output pointer + * @param[in] blockSize is the number of samples to process + */ + void arm_q31_to_float( + const q31_t * pSrc, + float32_t * pDst, + uint32_t blockSize); + + + /** + * @brief Converts the elements of the Q31 vector to Q15 vector. + * @param[in] pSrc is input pointer + * @param[out] pDst is output pointer + * @param[in] blockSize is the number of samples to process + */ + void arm_q31_to_q15( + const q31_t * pSrc, + q15_t * pDst, + uint32_t blockSize); + + + /** + * @brief Converts the elements of the Q31 vector to Q7 vector. + * @param[in] pSrc is input pointer + * @param[out] pDst is output pointer + * @param[in] blockSize is the number of samples to process + */ + void arm_q31_to_q7( + const q31_t * pSrc, + q7_t * pDst, + uint32_t blockSize); + + + /** + * @brief Converts the elements of the Q15 vector to floating-point vector. + * @param[in] pSrc is input pointer + * @param[out] pDst is output pointer + * @param[in] blockSize is the number of samples to process + */ + void arm_q15_to_float( + const q15_t * pSrc, + float32_t * pDst, + uint32_t blockSize); + + + /** + * @brief Converts the elements of the Q15 vector to Q31 vector. + * @param[in] pSrc is input pointer + * @param[out] pDst is output pointer + * @param[in] blockSize is the number of samples to process + */ + void arm_q15_to_q31( + const q15_t * pSrc, + q31_t * pDst, + uint32_t blockSize); + + + /** + * @brief Converts the elements of the Q15 vector to Q7 vector. + * @param[in] pSrc is input pointer + * @param[out] pDst is output pointer + * @param[in] blockSize is the number of samples to process + */ + void arm_q15_to_q7( + const q15_t * pSrc, + q7_t * pDst, + uint32_t blockSize); + + + /** + * @brief Converts the elements of the Q7 vector to floating-point vector. + * @param[in] pSrc is input pointer + * @param[out] pDst is output pointer + * @param[in] blockSize is the number of samples to process + */ + void arm_q7_to_float( + const q7_t * pSrc, + float32_t * pDst, + uint32_t blockSize); + + + /** + * @brief Converts the elements of the Q7 vector to Q31 vector. + * @param[in] pSrc input pointer + * @param[out] pDst output pointer + * @param[in] blockSize number of samples to process + */ + void arm_q7_to_q31( + const q7_t * pSrc, + q31_t * pDst, + uint32_t blockSize); + + + /** + * @brief Converts the elements of the Q7 vector to Q15 vector. + * @param[in] pSrc input pointer + * @param[out] pDst output pointer + * @param[in] blockSize number of samples to process + */ + void arm_q7_to_q15( + const q7_t * pSrc, + q15_t * pDst, + uint32_t blockSize); + +/** + * @brief Struct for specifying SVM Kernel + */ +typedef enum +{ + ARM_ML_KERNEL_LINEAR = 0, + /**< Linear kernel */ + ARM_ML_KERNEL_POLYNOMIAL = 1, + /**< Polynomial kernel */ + ARM_ML_KERNEL_RBF = 2, + /**< Radial Basis Function kernel */ + ARM_ML_KERNEL_SIGMOID = 3 + /**< Sigmoid kernel */ +} arm_ml_kernel_type; + + +/** + * @brief Instance structure for linear SVM prediction function. + */ +typedef struct +{ + uint32_t nbOfSupportVectors; /**< Number of support vectors */ + uint32_t vectorDimension; /**< Dimension of vector space */ + float32_t intercept; /**< Intercept */ + const float32_t *dualCoefficients; /**< Dual coefficients */ + const float32_t *supportVectors; /**< Support vectors */ + const int32_t *classes; /**< The two SVM classes */ +} arm_svm_linear_instance_f32; + + +/** + * @brief Instance structure for polynomial SVM prediction function. + */ +typedef struct +{ + uint32_t nbOfSupportVectors; /**< Number of support vectors */ + uint32_t vectorDimension; /**< Dimension of vector space */ + float32_t intercept; /**< Intercept */ + const float32_t *dualCoefficients; /**< Dual coefficients */ + const float32_t *supportVectors; /**< Support vectors */ + const int32_t *classes; /**< The two SVM classes */ + int32_t degree; /**< Polynomial degree */ + float32_t coef0; /**< Polynomial constant */ + float32_t gamma; /**< Gamma factor */ +} arm_svm_polynomial_instance_f32; + +/** + * @brief Instance structure for rbf SVM prediction function. + */ +typedef struct +{ + uint32_t nbOfSupportVectors; /**< Number of support vectors */ + uint32_t vectorDimension; /**< Dimension of vector space */ + float32_t intercept; /**< Intercept */ + const float32_t *dualCoefficients; /**< Dual coefficients */ + const float32_t *supportVectors; /**< Support vectors */ + const int32_t *classes; /**< The two SVM classes */ + float32_t gamma; /**< Gamma factor */ +} arm_svm_rbf_instance_f32; + +/** + * @brief Instance structure for sigmoid SVM prediction function. + */ +typedef struct +{ + uint32_t nbOfSupportVectors; /**< Number of support vectors */ + uint32_t vectorDimension; /**< Dimension of vector space */ + float32_t intercept; /**< Intercept */ + const float32_t *dualCoefficients; /**< Dual coefficients */ + const float32_t *supportVectors; /**< Support vectors */ + const int32_t *classes; /**< The two SVM classes */ + float32_t coef0; /**< Independant constant */ + float32_t gamma; /**< Gamma factor */ +} arm_svm_sigmoid_instance_f32; + +/** + * @brief SVM linear instance init function + * @param[in] S Parameters for SVM functions + * @param[in] nbOfSupportVectors Number of support vectors + * @param[in] vectorDimension Dimension of vector space + * @param[in] intercept Intercept + * @param[in] dualCoefficients Array of dual coefficients + * @param[in] supportVectors Array of support vectors + * @param[in] classes Array of 2 classes ID + * @return none. + * + */ + + +void arm_svm_linear_init_f32(arm_svm_linear_instance_f32 *S, + uint32_t nbOfSupportVectors, + uint32_t vectorDimension, + float32_t intercept, + const float32_t *dualCoefficients, + const float32_t *supportVectors, + const int32_t *classes); + +/** + * @brief SVM linear prediction + * @param[in] S Pointer to an instance of the linear SVM structure. + * @param[in] in Pointer to input vector + * @param[out] pResult Decision value + * @return none. + * + */ + +void arm_svm_linear_predict_f32(const arm_svm_linear_instance_f32 *S, + const float32_t * in, + int32_t * pResult); + + +/** + * @brief SVM polynomial instance init function + * @param[in] S points to an instance of the polynomial SVM structure. + * @param[in] nbOfSupportVectors Number of support vectors + * @param[in] vectorDimension Dimension of vector space + * @param[in] intercept Intercept + * @param[in] dualCoefficients Array of dual coefficients + * @param[in] supportVectors Array of support vectors + * @param[in] classes Array of 2 classes ID + * @param[in] degree Polynomial degree + * @param[in] coef0 coeff0 (scikit-learn terminology) + * @param[in] gamma gamma (scikit-learn terminology) + * @return none. + * + */ + + +void arm_svm_polynomial_init_f32(arm_svm_polynomial_instance_f32 *S, + uint32_t nbOfSupportVectors, + uint32_t vectorDimension, + float32_t intercept, + const float32_t *dualCoefficients, + const float32_t *supportVectors, + const int32_t *classes, + int32_t degree, + float32_t coef0, + float32_t gamma + ); + +/** + * @brief SVM polynomial prediction + * @param[in] S Pointer to an instance of the polynomial SVM structure. + * @param[in] in Pointer to input vector + * @param[out] pResult Decision value + * @return none. + * + */ +void arm_svm_polynomial_predict_f32(const arm_svm_polynomial_instance_f32 *S, + const float32_t * in, + int32_t * pResult); + + +/** + * @brief SVM radial basis function instance init function + * @param[in] S points to an instance of the polynomial SVM structure. + * @param[in] nbOfSupportVectors Number of support vectors + * @param[in] vectorDimension Dimension of vector space + * @param[in] intercept Intercept + * @param[in] dualCoefficients Array of dual coefficients + * @param[in] supportVectors Array of support vectors + * @param[in] classes Array of 2 classes ID + * @param[in] gamma gamma (scikit-learn terminology) + * @return none. + * + */ + +void arm_svm_rbf_init_f32(arm_svm_rbf_instance_f32 *S, + uint32_t nbOfSupportVectors, + uint32_t vectorDimension, + float32_t intercept, + const float32_t *dualCoefficients, + const float32_t *supportVectors, + const int32_t *classes, + float32_t gamma + ); + +/** + * @brief SVM rbf prediction + * @param[in] S Pointer to an instance of the rbf SVM structure. + * @param[in] in Pointer to input vector + * @param[out] pResult decision value + * @return none. + * + */ +void arm_svm_rbf_predict_f32(const arm_svm_rbf_instance_f32 *S, + const float32_t * in, + int32_t * pResult); + +/** + * @brief SVM sigmoid instance init function + * @param[in] S points to an instance of the rbf SVM structure. + * @param[in] nbOfSupportVectors Number of support vectors + * @param[in] vectorDimension Dimension of vector space + * @param[in] intercept Intercept + * @param[in] dualCoefficients Array of dual coefficients + * @param[in] supportVectors Array of support vectors + * @param[in] classes Array of 2 classes ID + * @param[in] coef0 coeff0 (scikit-learn terminology) + * @param[in] gamma gamma (scikit-learn terminology) + * @return none. + * + */ + +void arm_svm_sigmoid_init_f32(arm_svm_sigmoid_instance_f32 *S, + uint32_t nbOfSupportVectors, + uint32_t vectorDimension, + float32_t intercept, + const float32_t *dualCoefficients, + const float32_t *supportVectors, + const int32_t *classes, + float32_t coef0, + float32_t gamma + ); + +/** + * @brief SVM sigmoid prediction + * @param[in] S Pointer to an instance of the rbf SVM structure. + * @param[in] in Pointer to input vector + * @param[out] pResult Decision value + * @return none. + * + */ +void arm_svm_sigmoid_predict_f32(const arm_svm_sigmoid_instance_f32 *S, + const float32_t * in, + int32_t * pResult); + + + +/** + * @brief Instance structure for Naive Gaussian Bayesian estimator. + */ +typedef struct +{ + uint32_t vectorDimension; /**< Dimension of vector space */ + uint32_t numberOfClasses; /**< Number of different classes */ + const float32_t *theta; /**< Mean values for the Gaussians */ + const float32_t *sigma; /**< Variances for the Gaussians */ + const float32_t *classPriors; /**< Class prior probabilities */ + float32_t epsilon; /**< Additive value to variances */ +} arm_gaussian_naive_bayes_instance_f32; + +/** + * @brief Naive Gaussian Bayesian Estimator + * + * @param[in] S points to a naive bayes instance structure + * @param[in] in points to the elements of the input vector. + * @param[in] pBuffer points to a buffer of length numberOfClasses + * @return The predicted class + * + */ + + +uint32_t arm_gaussian_naive_bayes_predict_f32(const arm_gaussian_naive_bayes_instance_f32 *S, + const float32_t * in, + float32_t *pBuffer); + +/** + * @brief Computation of the LogSumExp + * + * In probabilistic computations, the dynamic of the probability values can be very + * wide because they come from gaussian functions. + * To avoid underflow and overflow issues, the values are represented by their log. + * In this representation, multiplying the original exp values is easy : their logs are added. + * But adding the original exp values is requiring some special handling and it is the + * goal of the LogSumExp function. + * + * If the values are x1...xn, the function is computing: + * + * ln(exp(x1) + ... + exp(xn)) and the computation is done in such a way that + * rounding issues are minimised. + * + * The max xm of the values is extracted and the function is computing: + * xm + ln(exp(x1 - xm) + ... + exp(xn - xm)) + * + * @param[in] *in Pointer to an array of input values. + * @param[in] blockSize Number of samples in the input array. + * @return LogSumExp + * + */ + + +float32_t arm_logsumexp_f32(const float32_t *in, uint32_t blockSize); + +/** + * @brief Dot product with log arithmetic + * + * Vectors are containing the log of the samples + * + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[in] blockSize number of samples in each vector + * @param[in] pTmpBuffer temporary buffer of length blockSize + * @return The log of the dot product . + * + */ + + +float32_t arm_logsumexp_dot_prod_f32(const float32_t * pSrcA, + const float32_t * pSrcB, + uint32_t blockSize, + float32_t *pTmpBuffer); + +/** + * @brief Entropy + * + * @param[in] pSrcA Array of input values. + * @param[in] blockSize Number of samples in the input array. + * @return Entropy -Sum(p ln p) + * + */ + + +float32_t arm_entropy_f32(const float32_t * pSrcA,uint32_t blockSize); + + +/** + * @brief Entropy + * + * @param[in] pSrcA Array of input values. + * @param[in] blockSize Number of samples in the input array. + * @return Entropy -Sum(p ln p) + * + */ + + +float64_t arm_entropy_f64(const float64_t * pSrcA, uint32_t blockSize); + + +/** + * @brief Kullback-Leibler + * + * @param[in] pSrcA Pointer to an array of input values for probability distribution A. + * @param[in] pSrcB Pointer to an array of input values for probability distribution B. + * @param[in] blockSize Number of samples in the input array. + * @return Kullback-Leibler Divergence D(A || B) + * + */ +float32_t arm_kullback_leibler_f32(const float32_t * pSrcA + ,const float32_t * pSrcB + ,uint32_t blockSize); + + +/** + * @brief Kullback-Leibler + * + * @param[in] pSrcA Pointer to an array of input values for probability distribution A. + * @param[in] pSrcB Pointer to an array of input values for probability distribution B. + * @param[in] blockSize Number of samples in the input array. + * @return Kullback-Leibler Divergence D(A || B) + * + */ +float64_t arm_kullback_leibler_f64(const float64_t * pSrcA, + const float64_t * pSrcB, + uint32_t blockSize); + + +/** + * @brief Weighted sum + * + * + * @param[in] *in Array of input values. + * @param[in] *weigths Weights + * @param[in] blockSize Number of samples in the input array. + * @return Weighted sum + * + */ +float32_t arm_weighted_sum_f32(const float32_t *in + , const float32_t *weigths + , uint32_t blockSize); + + +/** + * @brief Barycenter + * + * + * @param[in] in List of vectors + * @param[in] weights Weights of the vectors + * @param[out] out Barycenter + * @param[in] nbVectors Number of vectors + * @param[in] vecDim Dimension of space (vector dimension) + * @return None + * + */ +void arm_barycenter_f32(const float32_t *in + , const float32_t *weights + , float32_t *out + , uint32_t nbVectors + , uint32_t vecDim); + +/** + * @brief Euclidean distance between two vectors + * @param[in] pA First vector + * @param[in] pB Second vector + * @param[in] blockSize vector length + * @return distance + * + */ + +float32_t arm_euclidean_distance_f32(const float32_t *pA,const float32_t *pB, uint32_t blockSize); + +/** + * @brief Bray-Curtis distance between two vectors + * @param[in] pA First vector + * @param[in] pB Second vector + * @param[in] blockSize vector length + * @return distance + * + */ +float32_t arm_braycurtis_distance_f32(const float32_t *pA,const float32_t *pB, uint32_t blockSize); + +/** + * @brief Canberra distance between two vectors + * + * This function may divide by zero when samples pA[i] and pB[i] are both zero. + * The result of the computation will be correct. So the division per zero may be + * ignored. + * + * @param[in] pA First vector + * @param[in] pB Second vector + * @param[in] blockSize vector length + * @return distance + * + */ +float32_t arm_canberra_distance_f32(const float32_t *pA,const float32_t *pB, uint32_t blockSize); + + +/** + * @brief Chebyshev distance between two vectors + * @param[in] pA First vector + * @param[in] pB Second vector + * @param[in] blockSize vector length + * @return distance + * + */ +float32_t arm_chebyshev_distance_f32(const float32_t *pA,const float32_t *pB, uint32_t blockSize); + + +/** + * @brief Cityblock (Manhattan) distance between two vectors + * @param[in] pA First vector + * @param[in] pB Second vector + * @param[in] blockSize vector length + * @return distance + * + */ +float32_t arm_cityblock_distance_f32(const float32_t *pA,const float32_t *pB, uint32_t blockSize); + +/** + * @brief Correlation distance between two vectors + * + * The input vectors are modified in place ! + * + * @param[in] pA First vector + * @param[in] pB Second vector + * @param[in] blockSize vector length + * @return distance + * + */ +float32_t arm_correlation_distance_f32(float32_t *pA,float32_t *pB, uint32_t blockSize); + +/** + * @brief Cosine distance between two vectors + * + * @param[in] pA First vector + * @param[in] pB Second vector + * @param[in] blockSize vector length + * @return distance + * + */ + +float32_t arm_cosine_distance_f32(const float32_t *pA,const float32_t *pB, uint32_t blockSize); + +/** + * @brief Jensen-Shannon distance between two vectors + * + * This function is assuming that elements of second vector are > 0 + * and 0 only when the corresponding element of first vector is 0. + * Otherwise the result of the computation does not make sense + * and for speed reasons, the cases returning NaN or Infinity are not + * managed. + * + * When the function is computing x log (x / y) with x 0 and y 0, + * it will compute the right value (0) but a division per zero will occur + * and shoudl be ignored in client code. + * + * @param[in] pA First vector + * @param[in] pB Second vector + * @param[in] blockSize vector length + * @return distance + * + */ + +float32_t arm_jensenshannon_distance_f32(const float32_t *pA,const float32_t *pB,uint32_t blockSize); + +/** + * @brief Minkowski distance between two vectors + * + * @param[in] pA First vector + * @param[in] pB Second vector + * @param[in] n Norm order (>= 2) + * @param[in] blockSize vector length + * @return distance + * + */ + + + +float32_t arm_minkowski_distance_f32(const float32_t *pA,const float32_t *pB, int32_t order, uint32_t blockSize); + +/** + * @brief Dice distance between two vectors + * + * @param[in] pA First vector of packed booleans + * @param[in] pB Second vector of packed booleans + * @param[in] order Distance order + * @param[in] blockSize Number of samples + * @return distance + * + */ + + +float32_t arm_dice_distance(const uint32_t *pA, const uint32_t *pB, uint32_t numberOfBools); + +/** + * @brief Hamming distance between two vectors + * + * @param[in] pA First vector of packed booleans + * @param[in] pB Second vector of packed booleans + * @param[in] numberOfBools Number of booleans + * @return distance + * + */ + +float32_t arm_hamming_distance(const uint32_t *pA, const uint32_t *pB, uint32_t numberOfBools); + +/** + * @brief Jaccard distance between two vectors + * + * @param[in] pA First vector of packed booleans + * @param[in] pB Second vector of packed booleans + * @param[in] numberOfBools Number of booleans + * @return distance + * + */ + +float32_t arm_jaccard_distance(const uint32_t *pA, const uint32_t *pB, uint32_t numberOfBools); + +/** + * @brief Kulsinski distance between two vectors + * + * @param[in] pA First vector of packed booleans + * @param[in] pB Second vector of packed booleans + * @param[in] numberOfBools Number of booleans + * @return distance + * + */ + +float32_t arm_kulsinski_distance(const uint32_t *pA, const uint32_t *pB, uint32_t numberOfBools); + +/** + * @brief Roger Stanimoto distance between two vectors + * + * @param[in] pA First vector of packed booleans + * @param[in] pB Second vector of packed booleans + * @param[in] numberOfBools Number of booleans + * @return distance + * + */ + +float32_t arm_rogerstanimoto_distance(const uint32_t *pA, const uint32_t *pB, uint32_t numberOfBools); + +/** + * @brief Russell-Rao distance between two vectors + * + * @param[in] pA First vector of packed booleans + * @param[in] pB Second vector of packed booleans + * @param[in] numberOfBools Number of booleans + * @return distance + * + */ + +float32_t arm_russellrao_distance(const uint32_t *pA, const uint32_t *pB, uint32_t numberOfBools); + +/** + * @brief Sokal-Michener distance between two vectors + * + * @param[in] pA First vector of packed booleans + * @param[in] pB Second vector of packed booleans + * @param[in] numberOfBools Number of booleans + * @return distance + * + */ + +float32_t arm_sokalmichener_distance(const uint32_t *pA, const uint32_t *pB, uint32_t numberOfBools); + +/** + * @brief Sokal-Sneath distance between two vectors + * + * @param[in] pA First vector of packed booleans + * @param[in] pB Second vector of packed booleans + * @param[in] numberOfBools Number of booleans + * @return distance + * + */ + +float32_t arm_sokalsneath_distance(const uint32_t *pA, const uint32_t *pB, uint32_t numberOfBools); + +/** + * @brief Yule distance between two vectors + * + * @param[in] pA First vector of packed booleans + * @param[in] pB Second vector of packed booleans + * @param[in] numberOfBools Number of booleans + * @return distance + * + */ + +float32_t arm_yule_distance(const uint32_t *pA, const uint32_t *pB, uint32_t numberOfBools); + + + /** + * @ingroup groupInterpolation + */ + + /** + * @defgroup BilinearInterpolate Bilinear Interpolation + * + * Bilinear interpolation is an extension of linear interpolation applied to a two dimensional grid. + * The underlying function f(x, y) is sampled on a regular grid and the interpolation process + * determines values between the grid points. + * Bilinear interpolation is equivalent to two step linear interpolation, first in the x-dimension and then in the y-dimension. + * Bilinear interpolation is often used in image processing to rescale images. + * The CMSIS DSP library provides bilinear interpolation functions for Q7, Q15, Q31, and floating-point data types. + * + * Algorithm + * \par + * The instance structure used by the bilinear interpolation functions describes a two dimensional data table. + * For floating-point, the instance structure is defined as: + *
+   *   typedef struct
+   *   {
+   *     uint16_t numRows;
+   *     uint16_t numCols;
+   *     float32_t *pData;
+   * } arm_bilinear_interp_instance_f32;
+   * 
+ * + * \par + * where numRows specifies the number of rows in the table; + * numCols specifies the number of columns in the table; + * and pData points to an array of size numRows*numCols values. + * The data table pTable is organized in row order and the supplied data values fall on integer indexes. + * That is, table element (x,y) is located at pTable[x + y*numCols] where x and y are integers. + * + * \par + * Let (x, y) specify the desired interpolation point. Then define: + *
+   *     XF = floor(x)
+   *     YF = floor(y)
+   * 
+ * \par + * The interpolated output point is computed as: + *
+   *  f(x, y) = f(XF, YF) * (1-(x-XF)) * (1-(y-YF))
+   *           + f(XF+1, YF) * (x-XF)*(1-(y-YF))
+   *           + f(XF, YF+1) * (1-(x-XF))*(y-YF)
+   *           + f(XF+1, YF+1) * (x-XF)*(y-YF)
+   * 
+ * Note that the coordinates (x, y) contain integer and fractional components. + * The integer components specify which portion of the table to use while the + * fractional components control the interpolation processor. + * + * \par + * if (x,y) are outside of the table boundary, Bilinear interpolation returns zero output. + */ + + + /** + * @addtogroup BilinearInterpolate + * @{ + */ + + /** + * @brief Floating-point bilinear interpolation. + * @param[in,out] S points to an instance of the interpolation structure. + * @param[in] X interpolation coordinate. + * @param[in] Y interpolation coordinate. + * @return out interpolated value. + */ + __STATIC_FORCEINLINE float32_t arm_bilinear_interp_f32( + const arm_bilinear_interp_instance_f32 * S, + float32_t X, + float32_t Y) + { + float32_t out; + float32_t f00, f01, f10, f11; + float32_t *pData = S->pData; + int32_t xIndex, yIndex, index; + float32_t xdiff, ydiff; + float32_t b1, b2, b3, b4; + + xIndex = (int32_t) X; + yIndex = (int32_t) Y; + + /* Care taken for table outside boundary */ + /* Returns zero output when values are outside table boundary */ + if (xIndex < 0 || xIndex > (S->numCols - 2) || yIndex < 0 || yIndex > (S->numRows - 2)) + { + return (0); + } + + /* Calculation of index for two nearest points in X-direction */ + index = (xIndex ) + (yIndex ) * S->numCols; + + + /* Read two nearest points in X-direction */ + f00 = pData[index]; + f01 = pData[index + 1]; + + /* Calculation of index for two nearest points in Y-direction */ + index = (xIndex ) + (yIndex+1) * S->numCols; + + + /* Read two nearest points in Y-direction */ + f10 = pData[index]; + f11 = pData[index + 1]; + + /* Calculation of intermediate values */ + b1 = f00; + b2 = f01 - f00; + b3 = f10 - f00; + b4 = f00 - f01 - f10 + f11; + + /* Calculation of fractional part in X */ + xdiff = X - xIndex; + + /* Calculation of fractional part in Y */ + ydiff = Y - yIndex; + + /* Calculation of bi-linear interpolated output */ + out = b1 + b2 * xdiff + b3 * ydiff + b4 * xdiff * ydiff; + + /* return to application */ + return (out); + } + + + /** + * @brief Q31 bilinear interpolation. + * @param[in,out] S points to an instance of the interpolation structure. + * @param[in] X interpolation coordinate in 12.20 format. + * @param[in] Y interpolation coordinate in 12.20 format. + * @return out interpolated value. + */ + __STATIC_FORCEINLINE q31_t arm_bilinear_interp_q31( + arm_bilinear_interp_instance_q31 * S, + q31_t X, + q31_t Y) + { + q31_t out; /* Temporary output */ + q31_t acc = 0; /* output */ + q31_t xfract, yfract; /* X, Y fractional parts */ + q31_t x1, x2, y1, y2; /* Nearest output values */ + int32_t rI, cI; /* Row and column indices */ + q31_t *pYData = S->pData; /* pointer to output table values */ + uint32_t nCols = S->numCols; /* num of rows */ + + /* Input is in 12.20 format */ + /* 12 bits for the table index */ + /* Index value calculation */ + rI = ((X & (q31_t)0xFFF00000) >> 20); + + /* Input is in 12.20 format */ + /* 12 bits for the table index */ + /* Index value calculation */ + cI = ((Y & (q31_t)0xFFF00000) >> 20); + + /* Care taken for table outside boundary */ + /* Returns zero output when values are outside table boundary */ + if (rI < 0 || rI > (S->numCols - 2) || cI < 0 || cI > (S->numRows - 2)) + { + return (0); + } + + /* 20 bits for the fractional part */ + /* shift left xfract by 11 to keep 1.31 format */ + xfract = (X & 0x000FFFFF) << 11U; + + /* Read two nearest output values from the index */ + x1 = pYData[(rI) + (int32_t)nCols * (cI) ]; + x2 = pYData[(rI) + (int32_t)nCols * (cI) + 1]; + + /* 20 bits for the fractional part */ + /* shift left yfract by 11 to keep 1.31 format */ + yfract = (Y & 0x000FFFFF) << 11U; + + /* Read two nearest output values from the index */ + y1 = pYData[(rI) + (int32_t)nCols * (cI + 1) ]; + y2 = pYData[(rI) + (int32_t)nCols * (cI + 1) + 1]; + + /* Calculation of x1 * (1-xfract ) * (1-yfract) and acc is in 3.29(q29) format */ + out = ((q31_t) (((q63_t) x1 * (0x7FFFFFFF - xfract)) >> 32)); + acc = ((q31_t) (((q63_t) out * (0x7FFFFFFF - yfract)) >> 32)); + + /* x2 * (xfract) * (1-yfract) in 3.29(q29) and adding to acc */ + out = ((q31_t) ((q63_t) x2 * (0x7FFFFFFF - yfract) >> 32)); + acc += ((q31_t) ((q63_t) out * (xfract) >> 32)); + + /* y1 * (1 - xfract) * (yfract) in 3.29(q29) and adding to acc */ + out = ((q31_t) ((q63_t) y1 * (0x7FFFFFFF - xfract) >> 32)); + acc += ((q31_t) ((q63_t) out * (yfract) >> 32)); + + /* y2 * (xfract) * (yfract) in 3.29(q29) and adding to acc */ + out = ((q31_t) ((q63_t) y2 * (xfract) >> 32)); + acc += ((q31_t) ((q63_t) out * (yfract) >> 32)); + + /* Convert acc to 1.31(q31) format */ + return ((q31_t)(acc << 2)); + } + + + /** + * @brief Q15 bilinear interpolation. + * @param[in,out] S points to an instance of the interpolation structure. + * @param[in] X interpolation coordinate in 12.20 format. + * @param[in] Y interpolation coordinate in 12.20 format. + * @return out interpolated value. + */ + __STATIC_FORCEINLINE q15_t arm_bilinear_interp_q15( + arm_bilinear_interp_instance_q15 * S, + q31_t X, + q31_t Y) + { + q63_t acc = 0; /* output */ + q31_t out; /* Temporary output */ + q15_t x1, x2, y1, y2; /* Nearest output values */ + q31_t xfract, yfract; /* X, Y fractional parts */ + int32_t rI, cI; /* Row and column indices */ + q15_t *pYData = S->pData; /* pointer to output table values */ + uint32_t nCols = S->numCols; /* num of rows */ + + /* Input is in 12.20 format */ + /* 12 bits for the table index */ + /* Index value calculation */ + rI = ((X & (q31_t)0xFFF00000) >> 20); + + /* Input is in 12.20 format */ + /* 12 bits for the table index */ + /* Index value calculation */ + cI = ((Y & (q31_t)0xFFF00000) >> 20); + + /* Care taken for table outside boundary */ + /* Returns zero output when values are outside table boundary */ + if (rI < 0 || rI > (S->numCols - 2) || cI < 0 || cI > (S->numRows - 2)) + { + return (0); + } + + /* 20 bits for the fractional part */ + /* xfract should be in 12.20 format */ + xfract = (X & 0x000FFFFF); + + /* Read two nearest output values from the index */ + x1 = pYData[((uint32_t)rI) + nCols * ((uint32_t)cI) ]; + x2 = pYData[((uint32_t)rI) + nCols * ((uint32_t)cI) + 1]; + + /* 20 bits for the fractional part */ + /* yfract should be in 12.20 format */ + yfract = (Y & 0x000FFFFF); + + /* Read two nearest output values from the index */ + y1 = pYData[((uint32_t)rI) + nCols * ((uint32_t)cI + 1) ]; + y2 = pYData[((uint32_t)rI) + nCols * ((uint32_t)cI + 1) + 1]; + + /* Calculation of x1 * (1-xfract ) * (1-yfract) and acc is in 13.51 format */ + + /* x1 is in 1.15(q15), xfract in 12.20 format and out is in 13.35 format */ + /* convert 13.35 to 13.31 by right shifting and out is in 1.31 */ + out = (q31_t) (((q63_t) x1 * (0x0FFFFF - xfract)) >> 4U); + acc = ((q63_t) out * (0x0FFFFF - yfract)); + + /* x2 * (xfract) * (1-yfract) in 1.51 and adding to acc */ + out = (q31_t) (((q63_t) x2 * (0x0FFFFF - yfract)) >> 4U); + acc += ((q63_t) out * (xfract)); + + /* y1 * (1 - xfract) * (yfract) in 1.51 and adding to acc */ + out = (q31_t) (((q63_t) y1 * (0x0FFFFF - xfract)) >> 4U); + acc += ((q63_t) out * (yfract)); + + /* y2 * (xfract) * (yfract) in 1.51 and adding to acc */ + out = (q31_t) (((q63_t) y2 * (xfract)) >> 4U); + acc += ((q63_t) out * (yfract)); + + /* acc is in 13.51 format and down shift acc by 36 times */ + /* Convert out to 1.15 format */ + return ((q15_t)(acc >> 36)); + } + + + /** + * @brief Q7 bilinear interpolation. + * @param[in,out] S points to an instance of the interpolation structure. + * @param[in] X interpolation coordinate in 12.20 format. + * @param[in] Y interpolation coordinate in 12.20 format. + * @return out interpolated value. + */ + __STATIC_FORCEINLINE q7_t arm_bilinear_interp_q7( + arm_bilinear_interp_instance_q7 * S, + q31_t X, + q31_t Y) + { + q63_t acc = 0; /* output */ + q31_t out; /* Temporary output */ + q31_t xfract, yfract; /* X, Y fractional parts */ + q7_t x1, x2, y1, y2; /* Nearest output values */ + int32_t rI, cI; /* Row and column indices */ + q7_t *pYData = S->pData; /* pointer to output table values */ + uint32_t nCols = S->numCols; /* num of rows */ + + /* Input is in 12.20 format */ + /* 12 bits for the table index */ + /* Index value calculation */ + rI = ((X & (q31_t)0xFFF00000) >> 20); + + /* Input is in 12.20 format */ + /* 12 bits for the table index */ + /* Index value calculation */ + cI = ((Y & (q31_t)0xFFF00000) >> 20); + + /* Care taken for table outside boundary */ + /* Returns zero output when values are outside table boundary */ + if (rI < 0 || rI > (S->numCols - 2) || cI < 0 || cI > (S->numRows - 2)) + { + return (0); + } + + /* 20 bits for the fractional part */ + /* xfract should be in 12.20 format */ + xfract = (X & (q31_t)0x000FFFFF); + + /* Read two nearest output values from the index */ + x1 = pYData[((uint32_t)rI) + nCols * ((uint32_t)cI) ]; + x2 = pYData[((uint32_t)rI) + nCols * ((uint32_t)cI) + 1]; + + /* 20 bits for the fractional part */ + /* yfract should be in 12.20 format */ + yfract = (Y & (q31_t)0x000FFFFF); + + /* Read two nearest output values from the index */ + y1 = pYData[((uint32_t)rI) + nCols * ((uint32_t)cI + 1) ]; + y2 = pYData[((uint32_t)rI) + nCols * ((uint32_t)cI + 1) + 1]; + + /* Calculation of x1 * (1-xfract ) * (1-yfract) and acc is in 16.47 format */ + out = ((x1 * (0xFFFFF - xfract))); + acc = (((q63_t) out * (0xFFFFF - yfract))); + + /* x2 * (xfract) * (1-yfract) in 2.22 and adding to acc */ + out = ((x2 * (0xFFFFF - yfract))); + acc += (((q63_t) out * (xfract))); + + /* y1 * (1 - xfract) * (yfract) in 2.22 and adding to acc */ + out = ((y1 * (0xFFFFF - xfract))); + acc += (((q63_t) out * (yfract))); + + /* y2 * (xfract) * (yfract) in 2.22 and adding to acc */ + out = ((y2 * (yfract))); + acc += (((q63_t) out * (xfract))); + + /* acc in 16.47 format and down shift by 40 to convert to 1.7 format */ + return ((q7_t)(acc >> 40)); + } + + /** + * @} end of BilinearInterpolate group + */ + + +/* SMMLAR */ +#define multAcc_32x32_keep32_R(a, x, y) \ + a = (q31_t) (((((q63_t) a) << 32) + ((q63_t) x * y) + 0x80000000LL ) >> 32) + +/* SMMLSR */ +#define multSub_32x32_keep32_R(a, x, y) \ + a = (q31_t) (((((q63_t) a) << 32) - ((q63_t) x * y) + 0x80000000LL ) >> 32) + +/* SMMULR */ +#define mult_32x32_keep32_R(a, x, y) \ + a = (q31_t) (((q63_t) x * y + 0x80000000LL ) >> 32) + +/* SMMLA */ +#define multAcc_32x32_keep32(a, x, y) \ + a += (q31_t) (((q63_t) x * y) >> 32) + +/* SMMLS */ +#define multSub_32x32_keep32(a, x, y) \ + a -= (q31_t) (((q63_t) x * y) >> 32) + +/* SMMUL */ +#define mult_32x32_keep32(a, x, y) \ + a = (q31_t) (((q63_t) x * y ) >> 32) + + +#if defined ( __CC_ARM ) + /* Enter low optimization region - place directly above function definition */ + #if defined( __ARM_ARCH_7EM__ ) + #define LOW_OPTIMIZATION_ENTER \ + _Pragma ("push") \ + _Pragma ("O1") + #else + #define LOW_OPTIMIZATION_ENTER + #endif + + /* Exit low optimization region - place directly after end of function definition */ + #if defined ( __ARM_ARCH_7EM__ ) + #define LOW_OPTIMIZATION_EXIT \ + _Pragma ("pop") + #else + #define LOW_OPTIMIZATION_EXIT + #endif + + /* Enter low optimization region - place directly above function definition */ + #define IAR_ONLY_LOW_OPTIMIZATION_ENTER + + /* Exit low optimization region - place directly after end of function definition */ + #define IAR_ONLY_LOW_OPTIMIZATION_EXIT + +#elif defined (__ARMCC_VERSION ) && ( __ARMCC_VERSION >= 6010050 ) + #define LOW_OPTIMIZATION_ENTER + #define LOW_OPTIMIZATION_EXIT + #define IAR_ONLY_LOW_OPTIMIZATION_ENTER + #define IAR_ONLY_LOW_OPTIMIZATION_EXIT + +#elif defined ( __GNUC__ ) + #define LOW_OPTIMIZATION_ENTER \ + __attribute__(( optimize("-O1") )) + #define LOW_OPTIMIZATION_EXIT + #define IAR_ONLY_LOW_OPTIMIZATION_ENTER + #define IAR_ONLY_LOW_OPTIMIZATION_EXIT + +#elif defined ( __ICCARM__ ) + /* Enter low optimization region - place directly above function definition */ + #if defined ( __ARM_ARCH_7EM__ ) + #define LOW_OPTIMIZATION_ENTER \ + _Pragma ("optimize=low") + #else + #define LOW_OPTIMIZATION_ENTER + #endif + + /* Exit low optimization region - place directly after end of function definition */ + #define LOW_OPTIMIZATION_EXIT + + /* Enter low optimization region - place directly above function definition */ + #if defined ( __ARM_ARCH_7EM__ ) + #define IAR_ONLY_LOW_OPTIMIZATION_ENTER \ + _Pragma ("optimize=low") + #else + #define IAR_ONLY_LOW_OPTIMIZATION_ENTER + #endif + + /* Exit low optimization region - place directly after end of function definition */ + #define IAR_ONLY_LOW_OPTIMIZATION_EXIT + +#elif defined ( __TI_ARM__ ) + #define LOW_OPTIMIZATION_ENTER + #define LOW_OPTIMIZATION_EXIT + #define IAR_ONLY_LOW_OPTIMIZATION_ENTER + #define IAR_ONLY_LOW_OPTIMIZATION_EXIT + +#elif defined ( __CSMC__ ) + #define LOW_OPTIMIZATION_ENTER + #define LOW_OPTIMIZATION_EXIT + #define IAR_ONLY_LOW_OPTIMIZATION_ENTER + #define IAR_ONLY_LOW_OPTIMIZATION_EXIT + +#elif defined ( __TASKING__ ) + #define LOW_OPTIMIZATION_ENTER + #define LOW_OPTIMIZATION_EXIT + #define IAR_ONLY_LOW_OPTIMIZATION_ENTER + #define IAR_ONLY_LOW_OPTIMIZATION_EXIT + +#elif defined ( _MSC_VER ) || defined(__GNUC_PYTHON__) + #define LOW_OPTIMIZATION_ENTER + #define LOW_OPTIMIZATION_EXIT + #define IAR_ONLY_LOW_OPTIMIZATION_ENTER + #define IAR_ONLY_LOW_OPTIMIZATION_EXIT +#endif + + + +/* Compiler specific diagnostic adjustment */ +#if defined ( __CC_ARM ) + +#elif defined ( __ARMCC_VERSION ) && ( __ARMCC_VERSION >= 6010050 ) + +#elif defined ( __GNUC__ ) +#pragma GCC diagnostic pop + +#elif defined ( __ICCARM__ ) + +#elif defined ( __TI_ARM__ ) + +#elif defined ( __CSMC__ ) + +#elif defined ( __TASKING__ ) + +#elif defined ( _MSC_VER ) + +#else + #error Unknown compiler +#endif + +#ifdef __cplusplus +} +#endif + + +#endif /* _ARM_MATH_H */ + +/** + * + * End of file. + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_mve_tables.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_mve_tables.h new file mode 100755 index 00000000000..4d2c135ac6e --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_mve_tables.h @@ -0,0 +1,235 @@ +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_mve_tables.h + * Description: common tables like fft twiddle factors, Bitreverse, reciprocal etc + * used for MVE implementation only + * + * $Date: 08. January 2020 + * $Revision: V1.7.0 + * + * Target Processor: Cortex-M cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + #ifndef _ARM_MVE_TABLES_H + #define _ARM_MVE_TABLES_H + + #include "arm_math.h" + + + + + + +#if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES) + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_16) || defined(ARM_TABLE_TWIDDLECOEF_F32_32) + +extern uint32_t rearranged_twiddle_tab_stride1_arr_16_f32[2]; +extern uint32_t rearranged_twiddle_tab_stride2_arr_16_f32[2]; +extern uint32_t rearranged_twiddle_tab_stride3_arr_16_f32[2]; +extern float32_t rearranged_twiddle_stride1_16_f32[8]; +extern float32_t rearranged_twiddle_stride2_16_f32[8]; +extern float32_t rearranged_twiddle_stride3_16_f32[8]; +#endif + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_64) || defined(ARM_TABLE_TWIDDLECOEF_F32_128) + +extern uint32_t rearranged_twiddle_tab_stride1_arr_64_f32[3]; +extern uint32_t rearranged_twiddle_tab_stride2_arr_64_f32[3]; +extern uint32_t rearranged_twiddle_tab_stride3_arr_64_f32[3]; +extern float32_t rearranged_twiddle_stride1_64_f32[40]; +extern float32_t rearranged_twiddle_stride2_64_f32[40]; +extern float32_t rearranged_twiddle_stride3_64_f32[40]; +#endif + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_256) || defined(ARM_TABLE_TWIDDLECOEF_F32_512) + +extern uint32_t rearranged_twiddle_tab_stride1_arr_256_f32[4]; +extern uint32_t rearranged_twiddle_tab_stride2_arr_256_f32[4]; +extern uint32_t rearranged_twiddle_tab_stride3_arr_256_f32[4]; +extern float32_t rearranged_twiddle_stride1_256_f32[168]; +extern float32_t rearranged_twiddle_stride2_256_f32[168]; +extern float32_t rearranged_twiddle_stride3_256_f32[168]; +#endif + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_1024) || defined(ARM_TABLE_TWIDDLECOEF_F32_2048) + +extern uint32_t rearranged_twiddle_tab_stride1_arr_1024_f32[5]; +extern uint32_t rearranged_twiddle_tab_stride2_arr_1024_f32[5]; +extern uint32_t rearranged_twiddle_tab_stride3_arr_1024_f32[5]; +extern float32_t rearranged_twiddle_stride1_1024_f32[680]; +extern float32_t rearranged_twiddle_stride2_1024_f32[680]; +extern float32_t rearranged_twiddle_stride3_1024_f32[680]; +#endif + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_F32_4096) || defined(ARM_TABLE_TWIDDLECOEF_F32_8192) + +extern uint32_t rearranged_twiddle_tab_stride1_arr_4096_f32[6]; +extern uint32_t rearranged_twiddle_tab_stride2_arr_4096_f32[6]; +extern uint32_t rearranged_twiddle_tab_stride3_arr_4096_f32[6]; +extern float32_t rearranged_twiddle_stride1_4096_f32[2728]; +extern float32_t rearranged_twiddle_stride2_4096_f32[2728]; +extern float32_t rearranged_twiddle_stride3_4096_f32[2728]; +#endif + + +#endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES) */ + +#endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */ + + + +#if defined(ARM_MATH_MVEI) + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES) + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_16) || defined(ARM_TABLE_TWIDDLECOEF_Q31_32) + +extern uint32_t rearranged_twiddle_tab_stride1_arr_16_q31[2]; +extern uint32_t rearranged_twiddle_tab_stride2_arr_16_q31[2]; +extern uint32_t rearranged_twiddle_tab_stride3_arr_16_q31[2]; +extern q31_t rearranged_twiddle_stride1_16_q31[8]; +extern q31_t rearranged_twiddle_stride2_16_q31[8]; +extern q31_t rearranged_twiddle_stride3_16_q31[8]; +#endif + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_64) || defined(ARM_TABLE_TWIDDLECOEF_Q31_128) + +extern uint32_t rearranged_twiddle_tab_stride1_arr_64_q31[3]; +extern uint32_t rearranged_twiddle_tab_stride2_arr_64_q31[3]; +extern uint32_t rearranged_twiddle_tab_stride3_arr_64_q31[3]; +extern q31_t rearranged_twiddle_stride1_64_q31[40]; +extern q31_t rearranged_twiddle_stride2_64_q31[40]; +extern q31_t rearranged_twiddle_stride3_64_q31[40]; +#endif + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_256) || defined(ARM_TABLE_TWIDDLECOEF_Q31_512) + +extern uint32_t rearranged_twiddle_tab_stride1_arr_256_q31[4]; +extern uint32_t rearranged_twiddle_tab_stride2_arr_256_q31[4]; +extern uint32_t rearranged_twiddle_tab_stride3_arr_256_q31[4]; +extern q31_t rearranged_twiddle_stride1_256_q31[168]; +extern q31_t rearranged_twiddle_stride2_256_q31[168]; +extern q31_t rearranged_twiddle_stride3_256_q31[168]; +#endif + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_1024) || defined(ARM_TABLE_TWIDDLECOEF_Q31_2048) + +extern uint32_t rearranged_twiddle_tab_stride1_arr_1024_q31[5]; +extern uint32_t rearranged_twiddle_tab_stride2_arr_1024_q31[5]; +extern uint32_t rearranged_twiddle_tab_stride3_arr_1024_q31[5]; +extern q31_t rearranged_twiddle_stride1_1024_q31[680]; +extern q31_t rearranged_twiddle_stride2_1024_q31[680]; +extern q31_t rearranged_twiddle_stride3_1024_q31[680]; +#endif + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_4096) || defined(ARM_TABLE_TWIDDLECOEF_Q31_8192) + +extern uint32_t rearranged_twiddle_tab_stride1_arr_4096_q31[6]; +extern uint32_t rearranged_twiddle_tab_stride2_arr_4096_q31[6]; +extern uint32_t rearranged_twiddle_tab_stride3_arr_4096_q31[6]; +extern q31_t rearranged_twiddle_stride1_4096_q31[2728]; +extern q31_t rearranged_twiddle_stride2_4096_q31[2728]; +extern q31_t rearranged_twiddle_stride3_4096_q31[2728]; +#endif + + +#endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES) */ + +#endif /* defined(ARM_MATH_MVEI) */ + + + +#if defined(ARM_MATH_MVEI) + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES) + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_16) || defined(ARM_TABLE_TWIDDLECOEF_Q15_32) + +extern uint32_t rearranged_twiddle_tab_stride1_arr_16_q15[2]; +extern uint32_t rearranged_twiddle_tab_stride2_arr_16_q15[2]; +extern uint32_t rearranged_twiddle_tab_stride3_arr_16_q15[2]; +extern q15_t rearranged_twiddle_stride1_16_q15[8]; +extern q15_t rearranged_twiddle_stride2_16_q15[8]; +extern q15_t rearranged_twiddle_stride3_16_q15[8]; +#endif + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_64) || defined(ARM_TABLE_TWIDDLECOEF_Q15_128) + +extern uint32_t rearranged_twiddle_tab_stride1_arr_64_q15[3]; +extern uint32_t rearranged_twiddle_tab_stride2_arr_64_q15[3]; +extern uint32_t rearranged_twiddle_tab_stride3_arr_64_q15[3]; +extern q15_t rearranged_twiddle_stride1_64_q15[40]; +extern q15_t rearranged_twiddle_stride2_64_q15[40]; +extern q15_t rearranged_twiddle_stride3_64_q15[40]; +#endif + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_256) || defined(ARM_TABLE_TWIDDLECOEF_Q15_512) + +extern uint32_t rearranged_twiddle_tab_stride1_arr_256_q15[4]; +extern uint32_t rearranged_twiddle_tab_stride2_arr_256_q15[4]; +extern uint32_t rearranged_twiddle_tab_stride3_arr_256_q15[4]; +extern q15_t rearranged_twiddle_stride1_256_q15[168]; +extern q15_t rearranged_twiddle_stride2_256_q15[168]; +extern q15_t rearranged_twiddle_stride3_256_q15[168]; +#endif + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_1024) || defined(ARM_TABLE_TWIDDLECOEF_Q15_2048) + +extern uint32_t rearranged_twiddle_tab_stride1_arr_1024_q15[5]; +extern uint32_t rearranged_twiddle_tab_stride2_arr_1024_q15[5]; +extern uint32_t rearranged_twiddle_tab_stride3_arr_1024_q15[5]; +extern q15_t rearranged_twiddle_stride1_1024_q15[680]; +extern q15_t rearranged_twiddle_stride2_1024_q15[680]; +extern q15_t rearranged_twiddle_stride3_1024_q15[680]; +#endif + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_4096) || defined(ARM_TABLE_TWIDDLECOEF_Q15_8192) + +extern uint32_t rearranged_twiddle_tab_stride1_arr_4096_q15[6]; +extern uint32_t rearranged_twiddle_tab_stride2_arr_4096_q15[6]; +extern uint32_t rearranged_twiddle_tab_stride3_arr_4096_q15[6]; +extern q15_t rearranged_twiddle_stride1_4096_q15[2728]; +extern q15_t rearranged_twiddle_stride2_4096_q15[2728]; +extern q15_t rearranged_twiddle_stride3_4096_q15[2728]; +#endif + + +#endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES) */ + +#endif /* defined(ARM_MATH_MVEI) */ + + + +#if defined(ARM_MATH_MVEI) + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES) + + +#endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES) */ + +#endif /* defined(ARM_MATH_MVEI) */ + + + +#endif /*_ARM_MVE_TABLES_H*/ + diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_vec_math.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_vec_math.h new file mode 100755 index 00000000000..0ce9464bcb0 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/DSP/Include/arm_vec_math.h @@ -0,0 +1,372 @@ +/****************************************************************************** + * @file arm_vec_math.h + * @brief Public header file for CMSIS DSP Library + * @version V1.7.0 + * @date 15. October 2019 + ******************************************************************************/ +/* + * Copyright (c) 2010-2019 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _ARM_VEC_MATH_H +#define _ARM_VEC_MATH_H + +#include "arm_math.h" +#include "arm_common_tables.h" +#include "arm_helium_utils.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + +#if (defined(ARM_MATH_MVEF) || defined(ARM_MATH_HELIUM)) && !defined(ARM_MATH_AUTOVECTORIZE) + +#define INV_NEWTON_INIT_F32 0x7EF127EA + +static const float32_t __logf_rng_f32=0.693147180f; + + +/* fast inverse approximation (3x newton) */ +__STATIC_INLINE f32x4_t vrecip_medprec_f32( + f32x4_t x) +{ + q31x4_t m; + f32x4_t b; + any32x4_t xinv; + f32x4_t ax = vabsq(x); + + xinv.f = ax; + m = 0x3F800000 - (xinv.i & 0x7F800000); + xinv.i = xinv.i + m; + xinv.f = 1.41176471f - 0.47058824f * xinv.f; + xinv.i = xinv.i + m; + + b = 2.0f - xinv.f * ax; + xinv.f = xinv.f * b; + + b = 2.0f - xinv.f * ax; + xinv.f = xinv.f * b; + + b = 2.0f - xinv.f * ax; + xinv.f = xinv.f * b; + + xinv.f = vdupq_m(xinv.f, INFINITY, vcmpeqq(x, 0.0f)); + /* + * restore sign + */ + xinv.f = vnegq_m(xinv.f, xinv.f, vcmpltq(x, 0.0f)); + + return xinv.f; +} + +/* fast inverse approximation (4x newton) */ +__STATIC_INLINE f32x4_t vrecip_hiprec_f32( + f32x4_t x) +{ + q31x4_t m; + f32x4_t b; + any32x4_t xinv; + f32x4_t ax = vabsq(x); + + xinv.f = ax; + + m = 0x3F800000 - (xinv.i & 0x7F800000); + xinv.i = xinv.i + m; + xinv.f = 1.41176471f - 0.47058824f * xinv.f; + xinv.i = xinv.i + m; + + b = 2.0f - xinv.f * ax; + xinv.f = xinv.f * b; + + b = 2.0f - xinv.f * ax; + xinv.f = xinv.f * b; + + b = 2.0f - xinv.f * ax; + xinv.f = xinv.f * b; + + b = 2.0f - xinv.f * ax; + xinv.f = xinv.f * b; + + xinv.f = vdupq_m(xinv.f, INFINITY, vcmpeqq(x, 0.0f)); + /* + * restore sign + */ + xinv.f = vnegq_m(xinv.f, xinv.f, vcmpltq(x, 0.0f)); + + return xinv.f; +} + +__STATIC_INLINE f32x4_t vdiv_f32( + f32x4_t num, f32x4_t den) +{ + return vmulq(num, vrecip_hiprec_f32(den)); +} + +/** + @brief Single-precision taylor dev. + @param[in] x f32 quad vector input + @param[in] coeffs f32 quad vector coeffs + @return destination f32 quad vector + */ + +__STATIC_INLINE f32x4_t vtaylor_polyq_f32( + f32x4_t x, + const float32_t * coeffs) +{ + f32x4_t A = vfmasq(vdupq_n_f32(coeffs[4]), x, coeffs[0]); + f32x4_t B = vfmasq(vdupq_n_f32(coeffs[6]), x, coeffs[2]); + f32x4_t C = vfmasq(vdupq_n_f32(coeffs[5]), x, coeffs[1]); + f32x4_t D = vfmasq(vdupq_n_f32(coeffs[7]), x, coeffs[3]); + f32x4_t x2 = vmulq(x, x); + f32x4_t x4 = vmulq(x2, x2); + f32x4_t res = vfmaq(vfmaq_f32(A, B, x2), vfmaq_f32(C, D, x2), x4); + + return res; +} + +__STATIC_INLINE f32x4_t vmant_exp_f32( + f32x4_t x, + int32x4_t * e) +{ + any32x4_t r; + int32x4_t n; + + r.f = x; + n = r.i >> 23; + n = n - 127; + r.i = r.i - (n << 23); + + *e = n; + return r.f; +} + + +__STATIC_INLINE f32x4_t vlogq_f32(f32x4_t vecIn) +{ + q31x4_t vecExpUnBiased; + f32x4_t vecTmpFlt0, vecTmpFlt1; + f32x4_t vecAcc0, vecAcc1, vecAcc2, vecAcc3; + f32x4_t vecExpUnBiasedFlt; + + /* + * extract exponent + */ + vecTmpFlt1 = vmant_exp_f32(vecIn, &vecExpUnBiased); + + vecTmpFlt0 = vecTmpFlt1 * vecTmpFlt1; + /* + * a = (__logf_lut_f32[4] * r.f) + (__logf_lut_f32[0]); + */ + vecAcc0 = vdupq_n_f32(__logf_lut_f32[0]); + vecAcc0 = vfmaq(vecAcc0, vecTmpFlt1, __logf_lut_f32[4]); + /* + * b = (__logf_lut_f32[6] * r.f) + (__logf_lut_f32[2]); + */ + vecAcc1 = vdupq_n_f32(__logf_lut_f32[2]); + vecAcc1 = vfmaq(vecAcc1, vecTmpFlt1, __logf_lut_f32[6]); + /* + * c = (__logf_lut_f32[5] * r.f) + (__logf_lut_f32[1]); + */ + vecAcc2 = vdupq_n_f32(__logf_lut_f32[1]); + vecAcc2 = vfmaq(vecAcc2, vecTmpFlt1, __logf_lut_f32[5]); + /* + * d = (__logf_lut_f32[7] * r.f) + (__logf_lut_f32[3]); + */ + vecAcc3 = vdupq_n_f32(__logf_lut_f32[3]); + vecAcc3 = vfmaq(vecAcc3, vecTmpFlt1, __logf_lut_f32[7]); + /* + * a = a + b * xx; + */ + vecAcc0 = vfmaq(vecAcc0, vecAcc1, vecTmpFlt0); + /* + * c = c + d * xx; + */ + vecAcc2 = vfmaq(vecAcc2, vecAcc3, vecTmpFlt0); + /* + * xx = xx * xx; + */ + vecTmpFlt0 = vecTmpFlt0 * vecTmpFlt0; + vecExpUnBiasedFlt = vcvtq_f32_s32(vecExpUnBiased); + /* + * r.f = a + c * xx; + */ + vecAcc0 = vfmaq(vecAcc0, vecAcc2, vecTmpFlt0); + /* + * add exponent + * r.f = r.f + ((float32_t) m) * __logf_rng_f32; + */ + vecAcc0 = vfmaq(vecAcc0, vecExpUnBiasedFlt, __logf_rng_f32); + // set log0 down to -inf + vecAcc0 = vdupq_m(vecAcc0, -INFINITY, vcmpeqq(vecIn, 0.0f)); + return vecAcc0; +} + +__STATIC_INLINE f32x4_t vexpq_f32( + f32x4_t x) +{ + // Perform range reduction [-log(2),log(2)] + int32x4_t m = vcvtq_s32_f32(vmulq_n_f32(x, 1.4426950408f)); + f32x4_t val = vfmsq_f32(x, vcvtq_f32_s32(m), vdupq_n_f32(0.6931471805f)); + + // Polynomial Approximation + f32x4_t poly = vtaylor_polyq_f32(val, exp_tab); + + // Reconstruct + poly = (f32x4_t) (vqaddq_s32((q31x4_t) (poly), vqshlq_n_s32(m, 23))); + + poly = vdupq_m(poly, 0.0f, vcmpltq_n_s32(m, -126)); + return poly; +} + +__STATIC_INLINE f32x4_t arm_vec_exponent_f32(f32x4_t x, int32_t nb) +{ + f32x4_t r = x; + nb--; + while (nb > 0) { + r = vmulq(r, x); + nb--; + } + return (r); +} + +__STATIC_INLINE f32x4_t vrecip_f32(f32x4_t vecIn) +{ + f32x4_t vecSx, vecW, vecTmp; + any32x4_t v; + + vecSx = vabsq(vecIn); + + v.f = vecIn; + v.i = vsubq(vdupq_n_s32(INV_NEWTON_INIT_F32), v.i); + + vecW = vmulq(vecSx, v.f); + + // v.f = v.f * (8 + w * (-28 + w * (56 + w * (-70 + w *(56 + w * (-28 + w * (8 - w))))))); + vecTmp = vsubq(vdupq_n_f32(8.0f), vecW); + vecTmp = vfmasq(vecW, vecTmp, -28.0f); + vecTmp = vfmasq(vecW, vecTmp, 56.0f); + vecTmp = vfmasq(vecW, vecTmp, -70.0f); + vecTmp = vfmasq(vecW, vecTmp, 56.0f); + vecTmp = vfmasq(vecW, vecTmp, -28.0f); + vecTmp = vfmasq(vecW, vecTmp, 8.0f); + v.f = vmulq(v.f, vecTmp); + + v.f = vdupq_m(v.f, INFINITY, vcmpeqq(vecIn, 0.0f)); + /* + * restore sign + */ + v.f = vnegq_m(v.f, v.f, vcmpltq(vecIn, 0.0f)); + return v.f; +} + +__STATIC_INLINE f32x4_t vtanhq_f32( + f32x4_t val) +{ + f32x4_t x = + vminnmq_f32(vmaxnmq_f32(val, vdupq_n_f32(-10.f)), vdupq_n_f32(10.0f)); + f32x4_t exp2x = vexpq_f32(vmulq_n_f32(x, 2.f)); + f32x4_t num = vsubq_n_f32(exp2x, 1.f); + f32x4_t den = vaddq_n_f32(exp2x, 1.f); + f32x4_t tanh = vmulq_f32(num, vrecip_f32(den)); + return tanh; +} + +__STATIC_INLINE f32x4_t vpowq_f32( + f32x4_t val, + f32x4_t n) +{ + return vexpq_f32(vmulq_f32(n, vlogq_f32(val))); +} + +#endif /* (defined(ARM_MATH_MVEF) || defined(ARM_MATH_HELIUM)) && !defined(ARM_MATH_AUTOVECTORIZE)*/ + +#if (defined(ARM_MATH_MVEI) || defined(ARM_MATH_HELIUM)) +#endif /* (defined(ARM_MATH_MVEI) || defined(ARM_MATH_HELIUM)) */ + +#if (defined(ARM_MATH_NEON) || defined(ARM_MATH_NEON_EXPERIMENTAL)) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "NEMath.h" +/** + * @brief Vectorized integer exponentiation + * @param[in] x value + * @param[in] nb integer exponent >= 1 + * @return x^nb + * + */ +__STATIC_INLINE float32x4_t arm_vec_exponent_f32(float32x4_t x, int32_t nb) +{ + float32x4_t r = x; + nb --; + while(nb > 0) + { + r = vmulq_f32(r , x); + nb--; + } + return(r); +} + + +__STATIC_INLINE float32x4_t __arm_vec_sqrt_f32_neon(float32x4_t x) +{ + float32x4_t x1 = vmaxq_f32(x, vdupq_n_f32(FLT_MIN)); + float32x4_t e = vrsqrteq_f32(x1); + e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x1, e), e), e); + e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x1, e), e), e); + return vmulq_f32(x, e); +} + +__STATIC_INLINE int16x8_t __arm_vec_sqrt_q15_neon(int16x8_t vec) +{ + float32x4_t tempF; + int32x4_t tempHI,tempLO; + + tempLO = vmovl_s16(vget_low_s16(vec)); + tempF = vcvtq_n_f32_s32(tempLO,15); + tempF = __arm_vec_sqrt_f32_neon(tempF); + tempLO = vcvtq_n_s32_f32(tempF,15); + + tempHI = vmovl_s16(vget_high_s16(vec)); + tempF = vcvtq_n_f32_s32(tempHI,15); + tempF = __arm_vec_sqrt_f32_neon(tempF); + tempHI = vcvtq_n_s32_f32(tempF,15); + + return(vcombine_s16(vqmovn_s32(tempLO),vqmovn_s32(tempHI))); +} + +__STATIC_INLINE int32x4_t __arm_vec_sqrt_q31_neon(int32x4_t vec) +{ + float32x4_t temp; + + temp = vcvtq_n_f32_s32(vec,31); + temp = __arm_vec_sqrt_f32_neon(temp); + return(vcvtq_n_s32_f32(temp,31)); +} + +#endif /* (defined(ARM_MATH_NEON) || defined(ARM_MATH_NEON_EXPERIMENTAL)) && !defined(ARM_MATH_AUTOVECTORIZE) */ + +#ifdef __cplusplus +} +#endif + + +#endif /* _ARM_VEC_MATH_H */ + +/** + * + * End of file. + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/NN/Include/arm_nn_tables.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/NN/Include/arm_nn_tables.h new file mode 100755 index 00000000000..36be5a8326e --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/NN/Include/arm_nn_tables.h @@ -0,0 +1,56 @@ +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_tables.h + * Description: Extern declaration for NN tables + * + * $Date: 17. January 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _ARM_NN_TABLES_H +#define _ARM_NN_TABLES_H + +#include "arm_math.h" + +/** +* @brief tables for various activation functions +* +*/ + +extern const q15_t sigmoidTable_q15[256]; +extern const q7_t sigmoidTable_q7[256]; + +extern const q7_t tanhTable_q7[256]; +extern const q15_t tanhTable_q15[256]; + + /** + * @brief 2-way tables for various activation functions + * + * 2-way table, H table for value larger than 1/4 + * L table for value smaller than 1/4, H table for remaining + * We have this only for the q15_t version. It does not make + * sense to have it for q7_t type + */ +extern const q15_t sigmoidHTable_q15[192]; +extern const q15_t sigmoidLTable_q15[128]; + +#endif /* ARM_NN_TABLES_H */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/NN/Include/arm_nnfunctions.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/NN/Include/arm_nnfunctions.h new file mode 100755 index 00000000000..9a5a7c2805b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/NN/Include/arm_nnfunctions.h @@ -0,0 +1,2184 @@ +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nnfunctions.h + * Description: Public header file for CMSIS NN Library + * + * $Date: April 6, 2020 + * $Revision: V.2.0.0 + * + * Target Processor: Cortex-M cores + * -------------------------------------------------------------------- */ + +/** + \mainpage CMSIS NN Software Library + * + * Introduction + * ------------ + * + * This user manual describes the CMSIS NN software library, + * a collection of efficient neural network kernels developed to maximize the + * performance and minimize the memory footprint of neural networks on Cortex-M processor cores. + * + * The library is divided into a number of functions each covering a specific category: + * - Convolution Functions + * - Activation Functions + * - Fully-connected Layer Functions + * - Pooling Functions + * - Softmax Functions + * - Basic math Functions + * + * The library has separate functions for operating on different weight and activation data + * types including 8-bit integers (q7_t) and 16-bit integers (q15_t). The descrition of the + * kernels are included in the function description. The implementation details are also + * described in this paper [1]. + * + * Function Classification + * -------- + * The functions can be classified into two segments + * - Legacy functions supporting ARM's internal symmetric quantization(8 bits). + * - Functions that support TensorFlow Lite framework with symmetric quantization(8 bits). + * + * The legacy functions can be identified with their suffix of _q7 or _q15 and are no new development is done there. The article in [2] describes in detail + * how to run a network using the legacy functions. + * + * The functions supporting TensorFlow Lite framework is identified by the _s8 suffix and can be invoked from TFL micro. The functions are bit exact to + * TensorFlow Lite. Refer to the TensorFlow's documentation in [3] on how to run a TensorFlow Lite model using optimized CMSIS-NN kernels. + * + * Block Diagram + * -------- + * \image html CMSIS-NN-OVERVIEW.PNG + * + * Examples + * -------- + * + * The library ships with a number of examples which demonstrate how to use the library functions. + * + * Pre-processor Macros + * ------------ + * + * Each library project have different pre-processor macros. + * + * - ARM_MATH_DSP: + * + * Define macro ARM_MATH_DSP, If the silicon supports DSP instructions(DSP extension). + * + * - ARM_MATH_MVEI: + * + * Define macro ARM_MATH_MVEI, If the silicon supports M-Profile Vector Extension. + + * - ARM_MATH_AUTOVECTORIZE + * Used in conjucture with ARM_MATH_MVEI to let the compiler auto vectorize for the functions that uses inline assembly. + * It does not affect functions that use C or intrinsics. + * - ARM_MATH_BIG_ENDIAN: + * + * Define macro ARM_MATH_BIG_ENDIAN to build the library for big endian targets. This is supported only for the legacy functions i.e, functions targetted at + * TensorFlow Lite do not support big endianness. By default library builds for little endian targets. + * + * - ARM_NN_TRUNCATE: + * + * Define macro ARM_NN_TRUNCATE to use floor instead of round-to-the-nearest-int for the computation. + * + * Upcoming Interface Change + * -------- + * Starting from the 1.4.0 next release, CMSIS-NN will gradually switch to a new API interface to: + * + * -# have a stable API + * -# avoid passing many variables by value + * -# improve security + * -# improve validation + * -# improve code readability + * + * The upcoming API interface change will be based on "struct" and only affect the TensorFlowLite micro compliant APIs [4] (functions with _s8 suffix) + * + * Below you can find a snapshot of how the new API interface will look like (names can change) + * + * i.e. arm_convolve_1x1_s8_fast + * + * Current API interface | New API interface proposal + * ------------- | ------------- + * const q7_t *input | const cmsis_nn_context &ctx + * const uint16_t input_x | const cmsis_nn_conv_params ¶ms + * const uint16_t input_y | const cmsis_nn_dims &input_dims + * const uint16_t input_ch | const q7_t *input_data + * const uint16_t input_batches | const cmsis_nn_dims &filter_dims + * const q7_t *kernel | const q7_t *filter_data + * const uint16_t output_ch | const cmsis_nn_dims &bias_dims + * const uint16_t pad_x | const q31_t *bias_data + * const uint16_t pad_y | const cmsis_nn_dims &output_dims + * const uint16_t stride_x | q7_t *output_data + * const uint16_t stride_y |
+ * const int32_t *bias |
+ * q7_t *output |
+ * const int32_t *output_shift |
+ * const int32_t *output_mult |
+ * const int32_t out_offset |
+ * const int32_t input_offset |
+ * const int32_t out_activation_min |
+ * const int32_t out_activation_max |
+ * const uint16_t output_x |
+ * const uint16_t output_y |
+ * q15_t *buffer_a |
+ * + * Copyright Notice + * ------------ + * + * Copyright (C) 2010-2019 Arm Limited. All rights reserved. + * + * [1] CMSIS-NN: Efficient Neural Network Kernels for Arm Cortex-M CPUs https://arxiv.org/abs/1801.06601 + * + * [2] Converting a Neural Network for Arm Cortex-M with CMSIS-NN + * https://developer.arm.com/solutions/machine-learning-on-arm/developer-material/how-to-guides/converting-a-neural-network-for-arm-cortex-m-with-cmsis-nn/single-page + * [3] https://www.tensorflow.org/lite/microcontrollers/library + * + * [4] https://github.com/ARM-software/CMSIS_5/tree/develop/CMSIS/NN#legacy-vs-tfl-micro-compliant-apis + */ + +/** + * @defgroup groupNN Neural Network Functions + * A collection of functions to perform basic operations for neural network layers. Functions with a _s8 suffix support + * TensorFlow Lite framework. + */ + +#ifndef _ARM_NNFUNCTIONS_H +#define _ARM_NNFUNCTIONS_H + +#include "arm_nnsupportfunctions.h" +#include "arm_nn_tables.h" + +#define USE_INTRINSIC + +//#define ARM_NN_TRUNCATE /* This config the rounding model to floor or round to the nearest int */ + +#ifdef __cplusplus +extern "C" +{ +#endif + +/** + * @defgroup NNConv Convolution Functions + * + * Collection of convolution, depthwise convolution functions and their variants. + * + * The convolution is implemented in 2 steps: im2col and GEMM + * + * im2col is a process of converting each patch of image data into + * a column. After im2col, the convolution is computed as matrix-matrix + * multiplication. + * + * To reduce the memory footprint, the im2col is performed partially. + * Each iteration, only a few column (i.e., patches) are generated and + * computed with GEMM kernels similar to CMSIS-DSP arm_mat_mult functions. + * + */ + +/** + * @brief Basic s8 convolution function + * @param[in] input pointer to input tensor. Range: int8, format: [N,H,W,in_ch] + * @param[in] input_x input tensor width + * @param[in] input_y input tensor height + * @param[in] input_ch number of input tensor channels + * @param[in] input_batches number of input batches + * @param[in] kernel pointer to kernel weights. Range: int8, format: [out_ch, H, W, in_ch] + * @param[in] output_ch number of filters, i.e., output tensor channels + * @param[in] kernel_x filter/kernel width + * @param[in] kernel_y filter/kernel height + * @param[in] pad_x padding along width + * @param[in] pad_y padding along height + * @param[in] stride_x convolution stride x + * @param[in] stride_y convolution stride y + * @param[in] bias pointer to per output channel bias. Range: int32 + * @param[in,out] output pointer to output tensor. format: [H, W, out_ch] + * @param[in] output_shift pointer to per output channel requantization shift parameter. + * @param[in] output_mult pointer to per output channel requantization multiplier parameter. + * @param[in] out_offset output tensor offset. Range: int8 + * @param[in] input_offset input tensor offset. Range: int8 + * @param[in] output_activation_min Minimum value to clamp the output to. Range: int8 + * @param[in] output_activation_max Minimum value to clamp the output to. Range: int8 + * @param[in] output_x output tensor width + * @param[in] output_y output tensor height + * @param[in] buffer_a pointer to buffer space used for input optimization(partial im2col) and is necessary + * when ARM_MATH_DSP is defined. + * Required space: (2 * input_ch * kernel_x * kernel_y) * sizeof(q15_t) bytes + * Use arm_convolve_s8_get_buffer_size() to get the size. + * @return The function returns ARM_MATH_SUCCESS + * + * @details + * 1. Supported framework: TensorFlow Lite micro + * 2. q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. + * 3. Additional memory is required for optimization. Refer to argument 'buffer_a' for details. + * + */ + arm_status arm_convolve_s8(const q7_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_ch, + const uint16_t input_batches, + const q7_t *kernel, + const uint16_t output_ch, + const uint16_t kernel_x, + const uint16_t kernel_y, + const uint16_t pad_x, + const uint16_t pad_y, + const uint16_t stride_x, + const uint16_t stride_y, + const int32_t *bias, + q7_t *output, + const int32_t *output_shift, + const int32_t *output_mult, + const int32_t out_offset, + const int32_t input_offset, + const int32_t output_activation_min, + const int32_t output_activation_max, + const uint16_t output_x, + const uint16_t output_y, + q15_t *buffer_a); + + /** + * @brief Get the required buffer size for s8 convolution function + * @param[in] input_ch number of input tensor channels + * @param[in] kernel_x filter/kernel width + * @param[in] kernel_y filter/kernel height + * @return The function returns required buffer size + * + */ + int32_t arm_convolve_s8_get_buffer_size(const uint16_t input_ch, + const uint16_t kernel_x, + const uint16_t kernel_y); + + /** + * @brief Basic Q7 convolution function + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimension + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns ARM_MATH_SUCCESS + * + */ + arm_status arm_convolve_HWC_q7_basic(const q7_t * Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q7_t * wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q7_t * bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t * Im_out, + const uint16_t dim_im_out, + q15_t * bufferA, + q7_t * bufferB); + + /** + * @brief Basic Q7 convolution function (non-square shape) + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in_x input tensor dimension x + * @param[in] dim_im_in_y input tensor dimension y + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel_x filter kernel size x + * @param[in] dim_kernel_y filter kernel size y + * @param[in] padding_x padding size x + * @param[in] padding_y padding size y + * @param[in] stride_x convolution stride x + * @param[in] stride_y convolution stride y + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out_x output tensor dimension x + * @param[in] dim_im_out_y output tensor dimension y + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns ARM_MATH_SUCCESS + */ + arm_status arm_convolve_HWC_q7_basic_nonsquare(const q7_t * Im_in, + const uint16_t dim_im_in_x, + const uint16_t dim_im_in_y, + const uint16_t ch_im_in, + const q7_t * wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel_x, + const uint16_t dim_kernel_y, + const uint16_t padding_x, + const uint16_t padding_y, + const uint16_t stride_x, + const uint16_t stride_y, + const q7_t * bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t * Im_out, + const uint16_t dim_im_out_x, + const uint16_t dim_im_out_y, + q15_t * bufferA, + q7_t * bufferB); + + /** + * @brief Basic Q15 convolution function + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimension + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns ARM_MATH_SUCCESS + * + */ + arm_status arm_convolve_HWC_q15_basic(const q15_t * Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q15_t * wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q15_t * bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q15_t * Im_out, + const uint16_t dim_im_out, + q15_t * bufferA, + q7_t * bufferB); + + /** + * @brief Fast Q7 convolution function + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimension + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + * + * This function is the version with full list of optimization tricks, but with + * some contraints: + * ch_im_in is multiple of 4 + * ch_im_out is multiple of 2 + */ + arm_status arm_convolve_HWC_q7_fast(const q7_t * Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q7_t * wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q7_t * bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t * Im_out, + const uint16_t dim_im_out, + q15_t * bufferA, + q7_t * bufferB); + + /** + * @brief Fast Q7 convolution function (non-sqaure shape) + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in_x input tensor dimension x + * @param[in] dim_im_in_y input tensor dimension y + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel_x filter kernel size x + * @param[in] dim_kernel_y filter kernel size y + * @param[in] padding_x padding size x + * @param[in] padding_y padding size y + * @param[in] stride_x convolution stride x + * @param[in] stride_y convolution stride y + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out_x output tensor dimension x + * @param[in] dim_im_out_y output tensor dimension y + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + * + * This function is the version with full list of optimization tricks, but with + * some contraints: + * ch_im_in is multiple of 4 + * ch_im_out is multiple of 2 + */ + + arm_status arm_convolve_HWC_q7_fast_nonsquare(const q7_t * Im_in, + const uint16_t dim_im_in_x, + const uint16_t dim_im_in_y, + const uint16_t ch_im_in, + const q7_t * wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel_x, + const uint16_t dim_kernel_y, + const uint16_t padding_x, + const uint16_t padding_y, + const uint16_t stride_x, + const uint16_t stride_y, + const q7_t * bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t * Im_out, + const uint16_t dim_im_out_x, + const uint16_t dim_im_out_y, + q15_t * bufferA, + q7_t * bufferB); + + /** + * @brief Fast Q7 version of 1x1 convolution (non-sqaure shape) + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in_x input tensor dimension x + * @param[in] dim_im_in_y input tensor dimension y + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel_x filter kernel size x + * @param[in] dim_kernel_y filter kernel size y + * @param[in] padding_x padding size x + * @param[in] padding_y padding size y + * @param[in] stride_x convolution stride x + * @param[in] stride_y convolution stride y + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out_x output tensor dimension x + * @param[in] dim_im_out_y output tensor dimension y + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH if argument constraints fail. or, + * ARM_MATH_SUCCESS on successful completion. + * + * This function implement convolution with 1x1 kernel size (i.e., dim_kernel_x=1 + * and dim_kernel_y=1). It can be used for + * second half of MobileNets after depthwise separable convolution. + * + * This function is the version with full list of optimization tricks, but with + * some contraints: + * ch_im_in is multiple of 4 + * ch_im_out is multiple of 2 + */ + arm_status arm_convolve_1x1_HWC_q7_fast_nonsquare(const q7_t * Im_in, + const uint16_t dim_im_in_x, + const uint16_t dim_im_in_y, + const uint16_t ch_im_in, + const q7_t * wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel_x, + const uint16_t dim_kernel_y, + const uint16_t padding_x, + const uint16_t padding_y, + const uint16_t stride_x, + const uint16_t stride_y, + const q7_t * bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t * Im_out, + const uint16_t dim_im_out_x, + const uint16_t dim_im_out_y, + q15_t * bufferA, + q7_t * bufferB); + + /** + * @brief Fast s8 version for 1x1 convolution (non-square shape) + * @param[in] input pointer to input tensor. Format: [N, H, W, in_ch] + * @param[in] input_x input tensor dimension x + * @param[in] input_y input tensor dimension y + * @param[in] input_ch number of input tensor channels + * @param[in] input_batches number of input batches + * @param[in] kernel pointer to kernel weights. Format: [out_ch, H, W, in_ch] + * @param[in] output_ch number of filters, i.e., output tensor channels + * @param[in] pad_x padding size x + * @param[in] pad_y padding size y + * @param[in] stride_x convolution stride x + * @param[in] stride_y convolution stride y + * @param[in] bias pointer to per channel bias. Range : int32 + * @param[in,out] output pointer to output tensor. Format: [H, W, out_ch] + * @param[in] output_shift pointer to per output channel requantization shift parameter. + * @param[in] output_mult pointer to per output channel requantization multiplier parameter. + * @param[in] out_offset output tensor offset. Range: int8 + * @param[in] input_offset input tensor offset. Range: -127 to 128 + * @param[in] out_activation_min Minimum value to clamp the output to. Range: int8 + * @param[in] out_activation_max Minimum value to clamp the output to. Range: int8 + * @param[in] output_x output tensor width + * @param[in] output_y output tensor height + * @param[in] buffer_a pointer to buffer space used if required by the implementation + * Use arm_convolve_1x1_s8_fast_get_buffer_size() to get the size + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH if argument constraints fail. or, + * ARM_MATH_SUCCESS on successful completion. + * + * @details + * - Supported framework : TensorFlow Lite Micro + * - The following constrains on the arguments apply + * -# input_ch is a multiple of 4 + * -# padding equals 0 + * -# Stride equals 1 + * -# kernel dimension is 1x1 (Not provided in the argument list) + * + */ + arm_status arm_convolve_1x1_s8_fast(const q7_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_ch, + const uint16_t input_batches, + const q7_t *kernel, + const uint16_t output_ch, + const uint16_t pad_x, + const uint16_t pad_y, + const uint16_t stride_x, + const uint16_t stride_y, + const int32_t *bias, + q7_t *output, + const int32_t *output_shift, + const int32_t *output_mult, + const int32_t out_offset, + const int32_t input_offset, + const int32_t out_activation_min, + const int32_t out_activation_max, + const uint16_t output_x, + const uint16_t output_y, + q15_t *buffer_a); + + /** + * @brief Get the required buffer size for the fast 1x1 convolution + * (non-square shape) s8 convolution function + * @param[in] input_ch number of input tensor channels + * @return The function returns required buffer size + * + */ + int32_t arm_convolve_1x1_s8_fast_get_buffer_size(const uint16_t input_ch); + + /** + * @brief 1xn convolution + * @param[in] input pointer to input tensor. Format: [N, H, W, in_ch] + * @param[in] input_x input tensor dimension x + * @param[in] input_ch number of input tensor channels + * @param[in] input_batches argument is not used. + * @param[in] kernel pointer to kernel weights. Format: [out_ch, H, W, in_ch] + * @param[in] output_ch number of filters, i.e., output tensor channels + * @param[in] kernel_x kernel width along x + * @param[in] pad_x padding along x + * @param[in] stride_x stride along x + * @param[in] bias pointer to per channel bias. Range : int32 + * @param[out] output pointer to output tensor. Format: [H, W, out_ch] + * @param[in] output_shift pointer to per output channel requantization shift parameter. + * @param[in] output_mult pointer to per output channel requantization multiplier parameter. + * @param[in] out_offset output tensor offset. Range: int8 + * @param[in] input_offset input tensor offset. Range: -127 to 128 + * @param[in] out_activation_min Minimum value to clamp the output to. Range: int8 + * @param[in] out_activation_max Minimum value to clamp the output to. Range: int8 + * @param[in] output_x output tensor width + * @param[in] buffer_a pointer to buffer space used for input optimization and is necessary + * when ARM_MATH_DSP is defined but not ARM_MATH_MVEI. + * Required space: 2 * input_ch * sizeof(q15_t) bytes + * Use arm_convolve_1_x_n_s8_get_buffer_size() to get the size + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH if argument constraints fail. or, + * ARM_MATH_SUCCESS on successful completion. + * + * @details + * - Supported framework : TensorFlow Lite Micro + * - The following constrains on the arguments apply + * -# input_batches equals 1 + * -# ouput_x is a multiple of 4 + * -# Explicit constraints(since it is for 1xN convolution) + * -## input_y equals 1 + * -## output_y equals 1 + * -## kernel_y equals 1 + *@todo Remove constraint on output_x to make the function generic. + * + */ + arm_status arm_convolve_1_x_n_s8(const q7_t *input, + const uint16_t input_x, + const uint16_t input_ch, + const uint16_t input_batches, + const q7_t *kernel, + const uint16_t output_ch, + const uint16_t kernel_x, + const uint16_t pad_x, + const uint16_t stride_x, + const int32_t *bias, + q7_t *output, + const int32_t *output_shift, + const int32_t *output_mult, + const int32_t out_offset, + const int32_t input_offset, + const int32_t out_activation_min, + const int32_t out_activation_max, + const uint16_t output_x, + q15_t *buffer_a); + + + /** + * @brief Get the required additional buffer size for 1xn convolution + * + * @param[in] input_ch number of input tensor channels + * @param[in] kernel_x filter/kernel width + * @param[in] kernel_y filter/kernel height + * @return The function returns required buffer size(bytes) + * + */ + int32_t arm_convolve_1_x_n_s8_get_buffer_size(const uint16_t input_ch, + const uint16_t kernel_x, + const uint16_t kernel_y); + + + /** + * @brief Q7 version of convolution for RGB image + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimension + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + * + * This kernel is written exclusively for convolution with ch_im_in + * equals 3. This applies on the first layer of CNNs which has input + * image with RGB format. + */ + + arm_status arm_convolve_HWC_q7_RGB(const q7_t * Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q7_t * wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q7_t * bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t * Im_out, + const uint16_t dim_im_out, + q15_t * bufferA, + q7_t * bufferB); + + /** + * @brief Fast Q15 convolution function + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimension + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + * + * This function is the version with full list of optimization tricks, but with + * some contraints: + * ch_im_in is multiple of 2 + * ch_im_out is multiple of 2 + */ + + arm_status arm_convolve_HWC_q15_fast(const q15_t * Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q15_t * wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q15_t * bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q15_t * Im_out, + const uint16_t dim_im_out, + q15_t * bufferA, + q7_t * bufferB); + + /** + * @brief Fast Q15 convolution function (non-sqaure shape) + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in_x input tensor dimension x + * @param[in] dim_im_in_y input tensor dimension y + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel_x filter kernel size x + * @param[in] dim_kernel_y filter kernel size y + * @param[in] padding_x padding size x + * @param[in] padding_y padding size y + * @param[in] stride_x convolution stride x + * @param[in] stride_y convolution stride y + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out_x output tensor dimension x + * @param[in] dim_im_out_y output tensor dimension y + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + * + * @details + * + * Buffer size: + * + * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel + * + * bufferB size: 0 + * + * Input dimension constraints: + * + * ch_im_in is multiple of 2 + * + * ch_im_out is multipe of 2 + * + */ + + arm_status + arm_convolve_HWC_q15_fast_nonsquare(const q15_t * Im_in, + const uint16_t dim_im_in_x, + const uint16_t dim_im_in_y, + const uint16_t ch_im_in, + const q15_t * wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel_x, + const uint16_t dim_kernel_y, + const uint16_t padding_x, + const uint16_t padding_y, + const uint16_t stride_x, + const uint16_t stride_y, + const q15_t * bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q15_t * Im_out, + const uint16_t dim_im_out_x, + const uint16_t dim_im_out_y, + q15_t * bufferA, + q7_t * bufferB); + + /** + * @brief Q7 depthwise separable convolution function + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimension + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + * + * This function is the version with full list of optimization tricks, but with + * some contraints: + * ch_im_in is multiple of 2 + * ch_im_out is multiple of 2 + */ + + arm_status arm_depthwise_separable_conv_HWC_q7(const q7_t * Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q7_t * wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q7_t * bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t * Im_out, + const uint16_t dim_im_out, + q15_t * bufferA, + q7_t * bufferB); + + /** + * @brief Q7 depthwise separable convolution function (non-square shape) + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in_x input tensor dimension x + * @param[in] dim_im_in_y input tensor dimension y + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel_x filter kernel size x + * @param[in] dim_kernel_y filter kernel size y + * @param[in] padding_x padding sizes x + * @param[in] padding_y padding sizes y + * @param[in] stride_x convolution stride x + * @param[in] stride_y convolution stride y + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out_x output tensor dimension x + * @param[in] dim_im_out_y output tensor dimension y + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + * + * This function is the version with full list of optimization tricks, but with + * some contraints: + * ch_im_in is multiple of 2 + * ch_im_out is multiple of 2 + */ + arm_status arm_depthwise_separable_conv_HWC_q7_nonsquare(const q7_t * Im_in, + const uint16_t dim_im_in_x, + const uint16_t dim_im_in_y, + const uint16_t ch_im_in, + const q7_t * wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel_x, + const uint16_t dim_kernel_y, + const uint16_t padding_x, + const uint16_t padding_y, + const uint16_t stride_x, + const uint16_t stride_y, + const q7_t * bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t * Im_out, + const uint16_t dim_im_out_x, + const uint16_t dim_im_out_y, + q15_t * bufferA, + q7_t * bufferB); + +/** + * @brief Basic s8 depthwise convolution function + * @param[in] input pointer to input tensor. Range: int8, format: [H,W,in_ch] + * @param[in] input_x input tensor width + * @param[in] input_y input tensor height + * @param[in] input_ch number of input tensor channels + * @param[in] kernel pointer to kernel weights. Range: int8, format: [in_ch, H, W, out_ch] + * @param[in] output_ch Number of output channels. output_ch = ch_mult * input_ch + * @param[in] ch_mult channel multiplier. + * @param[in] kernel_x filter/kernel width + * @param[in] kernel_y filter/kernel height + * @param[in] pad_x padding along width + * @param[in] pad_y padding along height + * @param[in] stride_x convolution stride along width + * @param[in] stride_y convolution stride along height + * @param[in] bias pointer to per output channel bias. Range: int32 + * @param[in,out] output pointer to output tensor. Format: [H, W, out_ch] + * @param[in] output_shift pointer to per output channel requantization shift parameter. + * @param[in] output_mult pointer to per output channel requantization multiplier parameter. + * @param[in] output_x output tensor width + * @param[in] output_y output tensor height + * @param[in] output_offset offset to elements of output tensor. Range: int8 + * @param[in] input_offset offset to elements of input tensor. Range: -127 to 128 + * @param[in] output_activation_min Minimum value to clamp the output to. Range: int8 + * @param[in] output_activation_max Minimum value to clamp the output to. Range: int8 + * @param[in] dilation_x dilation along x. Not used. Dilation factor of 1 is used. + * @param[in] dilation_y dilation along y. Not used. Dilation factor of 1 is used. + * @param[in] buffer_a Not used. + * + * @return The function returns ARM_MATH_SUCCESS + * + * @details + * 1. Supported framework: TensorFlow Lite + * 2. q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. + * 3. Optimization using DSP extension is not available for the generic case where channel multiplier is > 1. + * + */ + + arm_status arm_depthwise_conv_s8(const q7_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_ch, + const q7_t *kernel, + const uint16_t output_ch, + const uint16_t ch_mult, + const uint16_t kernel_x, + const uint16_t kernel_y, + const uint16_t pad_x, + const uint16_t pad_y, + const uint16_t stride_x, + const uint16_t stride_y, + const int32_t *bias, + q7_t *output, + const int32_t *output_shift, + const int32_t *output_mult, + const uint16_t output_x, + const uint16_t output_y, + const int32_t output_offset, + const int32_t input_offset, + const int32_t output_activation_min, + const int32_t output_activation_max, + const uint16_t dilation_x, + const uint16_t dilation_y, + q15_t *buffer_a); + +/** + * @brief Optimized s8 depthwise convolution function for 3x3 kernel size with constraint that in_channel equals out_channel + * @param[in] input pointer to input tensor. Range: int8, format: [H,W,in_ch] + * @param[in] input_x input tensor width + * @param[in] input_y input tensor height + * @param[in] input_ch number of input tensor channels + * @param[in] kernel pointer to kernel weights. Range: int8, Format: [in_ch, H, W, out_ch] + * @param[in] output_ch Number of output channels. + * @param[in] pad_x padding along width + * @param[in] pad_y padding along height + * @param[in] stride_x convolution stride along width + * @param[in] stride_y convolution stride along height + * @param[in] bias pointer to per output channel bias. Range: int8 + * @param[in,out] output pointer to output tensor. Format: [H, W, out_ch] + * @param[in] output_shift pointer to per output channel requantization shift parameter. + * @param[in] output_mult pointer to per output channel requantization multiplier parameter. + * @param[in] output_x output tensor width + * @param[in] output_y output tensor height + * @param[in] output_offset offset to elements of output tensor + * @param[in] input_offset offset to elements of input tensor + * @param[in] output_activation_min Minimum value to clamp the output to. Range: int8 + * @param[in] output_activation_max Minimum value to clamp the output to. Range: int8 + * @param[in] dilation_x dilation along x. Not used. Dilation factor of 1 is used. + * @param[in] dilation_y dilation along y. Not used. Dilation factor of 1 is used. + * @param[in] buffer_a Buffer for partial im2col optimization. Not used. + * + * @return The function returns one of the following + * ARM_MATH_SIZE_MISMATCH - Unsupported dimension of tensors + * ARM_MATH_ARGUMENT_ERROR - Unsupported pad size along the x axis + * ARM_MATH_SUCCESS - Successful operation + * + * @details + * Supported framework: TensorFlow Lite + * + */ + arm_status arm_depthwise_conv_3x3_s8(const int8_t *input, + const int32_t input_x, + const int32_t input_y, + const int32_t input_ch, + const int8_t *kernel, + const int32_t output_ch, + const int32_t pad_x, + const int32_t pad_y, + const int32_t stride_x, + const int32_t stride_y, + const int32_t *bias, + int8_t *output, + const int32_t *output_shift, + const int32_t *output_mult, + const int32_t output_x, + const int32_t output_y, + const int32_t output_offset, + const int32_t input_offset, + const int32_t output_activation_min, + const int32_t output_activation_max, + const int32_t dilation_x, + const int32_t dilation_y, + int16_t *buffer_a); + +/** + * @brief Optimized s8 depthwise convolution function with constraint that in_channel equals out_channel + * @param[in] input pointer to input tensor. Range: int8, format: [H,W,in_ch] + * @param[in] input_x input tensor width + * @param[in] input_y input tensor height + * @param[in] input_ch number of input tensor channels + * @param[in] kernel pointer to kernel weights. Range: int8, Format: [in_ch, H, W, out_ch] + * @param[in] output_ch Number of output channels. + * @param[in] kernel_x filter/kernel width + * @param[in] kernel_y filter/kernel height + * @param[in] pad_x padding along width + * @param[in] pad_y padding along height + * @param[in] stride_x convolution stride along width + * @param[in] stride_y convolution stride along height + * @param[in] bias pointer to per output channel bias. Range: int8 + * @param[in,out] output pointer to output tensor. Format: [H, W, out_ch] + * @param[in] output_shift pointer to per output channel requantization shift parameter. + * @param[in] output_mult pointer to per output channel requantization multiplier parameter. + * @param[in] output_x output tensor width + * @param[in] output_y output tensor height + * @param[in] output_offset offset to elements of output tensor + * @param[in] input_offset offset to elements of input tensor + * @param[in] output_activation_min Minimum value to clamp the output to. Range: int8 + * @param[in] output_activation_max Minimum value to clamp the output to. Range: int8 + * @param[in] dilation_x dilation along x. Not used. Dilation factor of 1 is used. + * @param[in] dilation_y dilation along y. Not used. Dilation factor of 1 is used. + * @param[in] buffer_a Buffer for partial im2col optimization. This is mandatory when + * ARM_MATH_DSP is defined. + * Required space: (2 * input_ch * kernel_x * kernel_y) * sizeof(q15_t) bytes + * Use arm_depthwise_conv_s8_opt_get_buffer_size() to get the size. + * + * @return The function returns one of the following + * ARM_MATH_SIZE_MISMATCH - Unsupported dimension of tensors + * ARM_MATH_SUCCESS - Successful operation + * + * @note If number of channels is not a multiple of 4, upto 3 elements outside the boundary will be read out + * for the following if MVE optimizations(Arm Helium Technology) are used. + * - Output shift + * - Output multiplier + * - Output bias + * - kernel + * + * @details + * 1. Supported framework: TensorFlow Lite + * 2. q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. + * 3. Reccomended when number of channels is 4 or greater. + * + */ + arm_status arm_depthwise_conv_s8_opt(const q7_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_ch, + const q7_t *kernel, + const uint16_t output_ch, + const uint16_t kernel_x, + const uint16_t kernel_y, + const uint16_t pad_x, + const uint16_t pad_y, + const uint16_t stride_x, + const uint16_t stride_y, + const int32_t *bias, + q7_t *output, + const int32_t *output_shift, + const int32_t *output_mult, + const uint16_t output_x, + const uint16_t output_y, + const int32_t output_offset, + const int32_t input_offset, + const int32_t output_activation_min, + const int32_t output_activation_max, + const uint16_t dilation_x, + const uint16_t dilation_y, + q15_t *buffer_a); + + /** + * @brief Get the required buffer size for optimized s8 depthwise convolution + * function with constraint that in_channel equals out_channel. + * @param[in] input_ch number of input tensor channels + * @param[in] kernel_x filter/kernel width + * @param[in] kernel_y filter/kernel height + * @return The function returns required buffer size + * + */ +int32_t arm_depthwise_conv_s8_opt_get_buffer_size(const uint16_t input_ch, + const uint16_t kernel_x, + const uint16_t kernel_y); + +/** + * @defgroup FC Fully-connected Layer Functions + * + * Collection of fully-connected and matrix multiplication functions. + * + * Fully-connected layer is basically a matrix-vector multiplication + * with bias. The matrix is the weights and the input/output vectors + * are the activation values. Supported {weight, activation} precisions + * include {8-bit, 8-bit}, {16-bit, 16-bit}, and {8-bit, 16-bit}. + * + * Here we have two types of kernel functions. The basic function + * implements the function using regular GEMV approach. The opt functions + * operates with weights in interleaved formats. + * + */ + + /** + * @brief Q7 basic fully-connected layer function + * @param[in] pV pointer to input vector + * @param[in] pM pointer to matrix weights + * @param[in] dim_vec length of the vector + * @param[in] num_of_rows number of rows in weight matrix + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in] bias pointer to bias + * @param[in,out] pOut pointer to output vector + * @param[in,out] vec_buffer pointer to buffer space for input + * @return The function returns ARM_MATH_SUCCESS + * + */ + + arm_status arm_fully_connected_q7(const q7_t * pV, + const q7_t * pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, + const q7_t * bias, + q7_t * pOut, + q15_t * vec_buffer); + + /** + * @brief S8 basic fully-connected and matrix multiplication layer function for TF Lite + * @param[in] pInput pointer to pInput vector + * @param[in] pWeight pointer to matrix weights + * @param[in] col_dim dimension of the input vector + * @param[in] row_dim dimension of the output vector + * @param[in] nb_batches number of batches + * @param[in] input_offset tensor offset for input. Range: -127 to 128 + * @param[in] filter_offset tensor offset for filter. Range: -127 to 128 + * @param[in] out_mult requantization parameter + * @param[in] out_shift requantization parameter + * @param[in] output_offset tensor offset for output. Range: int8 + * @param[in] pBias pointer to bias + * @param[out] pOut pointer to output vector + * @param[in] output_activation_min for clamping + * @param[in] output_activation_max for clamping + * @param[in] vec_buffer pointer to buffer space used for optimization and is necessary + * when ARM_MATH_DSP is defined but not + * ARM_MATH_MVEI. + * Required space: col_dim * sizeof(q15_t) bytes + * Use arm_fully_connected_s8_get_buffer_size() to get the size. + * @return The function returns ARM_MATH_SUCCESS + * + * @details + * + * Buffer size: + * + * vec_buffer size: col_dim of word16. + * + * This basic function is designed to work with regular pWeight + * matrix without interleaving. + * + * 1. Supported framework: TensorFlow Lite + * 2. q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. + * + */ + + arm_status + arm_fully_connected_s8(const int8_t *pInput, + const int8_t *pWeight, + const uint16_t col_dim, + const uint16_t row_dim, + const uint16_t nb_batches, + const int32_t input_offset, + const int32_t filter_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t output_offset, + const int32_t *pBias, + int8_t *pOut, + const int32_t output_activation_min, + const int32_t output_activation_max, + q15_t *vec_buffer); + + /** + * @brief Get the required buffer size for S8 basic fully-connected and + * matrix multiplication layer function for TF Lite + * @param[in] col_dim dimension of the input vector + * @return The function returns required buffer size + * + */ + int32_t arm_fully_connected_s8_get_buffer_size(const uint16_t col_dim); + + /** + * @brief Q7 opt fully-connected layer function + * @param[in] pV pointer to input vector + * @param[in] pM pointer to matrix weights + * @param[in] dim_vec length of the vector + * @param[in] num_of_rows number of rows in weight matrix + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in] bias pointer to bias + * @param[in,out] pOut pointer to output vector + * @param[in,out] vec_buffer pointer to buffer space for input + * @return The function returns ARM_MATH_SUCCESS + * + */ + + arm_status arm_fully_connected_q7_opt(const q7_t * pV, + const q7_t * pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, + const q7_t * bias, + q7_t * pOut, + q15_t * vec_buffer); + + /** + * @brief Q15 basic fully-connected layer function + * @param[in] pV pointer to input vector + * @param[in] pM pointer to matrix weights + * @param[in] dim_vec length of the vector + * @param[in] num_of_rows number of rows in weight matrix + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in] bias pointer to bias + * @param[in,out] pOut pointer to output vector + * @param[in,out] vec_buffer pointer to buffer space for input + * @return The function returns ARM_MATH_SUCCESS + * + */ + + arm_status arm_fully_connected_q15(const q15_t * pV, + const q15_t * pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, + const q15_t * bias, + q15_t * pOut, + q15_t * vec_buffer); + + /** + * @brief Q15 opt fully-connected layer function + * @param[in] pV pointer to input vector + * @param[in] pM pointer to matrix weights + * @param[in] dim_vec length of the vector + * @param[in] num_of_rows number of rows in weight matrix + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in] bias pointer to bias + * @param[in,out] pOut pointer to output vector + * @param[in,out] vec_buffer pointer to buffer space for input + * @return The function returns ARM_MATH_SUCCESS + * + */ + + arm_status arm_fully_connected_q15_opt(const q15_t * pV, + const q15_t * pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, + const q15_t * bias, + q15_t * pOut, + q15_t * vec_buffer); + + /** + * @brief Mixed Q15-Q7 fully-connected layer function + * @param[in] pV pointer to input vector + * @param[in] pM pointer to matrix weights + * @param[in] dim_vec length of the vector + * @param[in] num_of_rows number of rows in weight matrix + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in] bias pointer to bias + * @param[in,out] pOut pointer to output vector + * @param[in,out] vec_buffer pointer to buffer space for input + * @return The function returns ARM_MATH_SUCCESS + * + */ + + arm_status arm_fully_connected_mat_q7_vec_q15(const q15_t * pV, + const q7_t * pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, + const q7_t * bias, + q15_t * pOut, + q15_t * vec_buffer); + + /** + * @brief Mixed Q15-Q7 opt fully-connected layer function + * @param[in] pV pointer to input vector + * @param[in] pM pointer to matrix weights + * @param[in] dim_vec length of the vector + * @param[in] num_of_rows number of rows in weight matrix + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in] bias pointer to bias + * @param[in,out] pOut pointer to output vector + * @param[in,out] vec_buffer pointer to buffer space for input + * @return The function returns ARM_MATH_SUCCESS + * + */ + + arm_status arm_fully_connected_mat_q7_vec_q15_opt(const q15_t * pV, + const q7_t * pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, + const q7_t * bias, + q15_t * pOut, + q15_t * vec_buffer); + +/** + * @brief Matrix-Multiplication Kernels for Convolution + * + * These functions are used within convolution layer functions for + * matrix multiplication. + * + * The implementation is similar to CMSIS-DSP arm_mat_mult functions + * with one Q7 and one Q15 operands. The Q15 operand is the im2col + * output which is always with 2 columns. + * + */ + + /** + * @brief Matrix-multiplication function for convolution + * @param[in] pA pointer to operand A + * @param[in] pInBuffer pointer to operand B, always conssists of 2 vectors + * @param[in] ch_im_out numRow of A + * @param[in] numCol_A numCol of A + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in] bias the bias + * @param[in,out] pOut pointer to output + * @return The function returns the incremented output pointer + */ + + q7_t *arm_nn_mat_mult_kernel_q7_q15(const q7_t * pA, + const q15_t * pInBuffer, + const uint16_t ch_im_out, + const uint16_t numCol_A, + const uint16_t bias_shift, + const uint16_t out_shift, + const q7_t * bias, + q7_t * pOut); + /** + * @brief Matrix-multiplication function for convolution with per-channel requantization. + * @param[in] input_a pointer to operand A + * @param[in] input_b pointer to operand B, always consists of 2 vectors. + * @param[in] output_ch number of rows of A + * @param[in] out_shift pointer to per output channel requantization shift parameter. + * @param[in] out_mult pointer to per output channel requantization multiplier parameter. + * @param[in] out_offset output tensor offset. + * @param[in] activation_min minimum value to clamp the output to. Range : int8 + * @param[in] activation_max maximum value to clamp the output to. Range : int8 + * @param[in] num_col_a number of columns of A + * @param[in] output_bias per output channel bias. Range : int32 + * @param[in,out] out_0 pointer to output + * @return The function returns one of the two + * 1. The incremented output pointer for a successful operation or + * 2. NULL if implementation is not available. + * + * @details This function does the matrix multiplication of weight matrix for all output channels + * with 2 columns from im2col and produces two elements/output_channel. The outputs are + * clamped in the range provided by activation min and max. + * Supported framework: TensorFlow Lite micro. + */ + q7_t *arm_nn_mat_mult_kernel_s8_s16(const q7_t *input_a, + const q15_t *input_b, + const uint16_t output_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int16_t activation_min, + const int16_t activation_max, + const uint16_t num_col_a, + const int32_t *const output_bias, + q7_t *out_0); + + /** + * @brief Matrix-multiplication of re-ordered input B with A. + * + * @details For arguments, refer arm_nn_mat_mult_kernel_s8_s16. The re-ordering is a consequence + * of sign extension done by the SXTB16 command on input_b. The outputs are clamped in the range + * provided by activation min and max. + * * @details + * - Supported framework : TensorFlow Lite Micro + * - The following constrains on the arguments apply + * -# num_col_a is a multiple of 4 + * -# output_ch is a multiple of 2 + * + */ + q7_t *arm_nn_mat_mult_kernel_s8_s16_reordered(const q7_t *input_a, + const q15_t *input_b, + const uint16_t output_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int16_t activation_min, + const int16_t activation_max, + const uint16_t num_col_a, + const int32_t *const output_bias, + q7_t *out_0); + + /** + * @brief Matrix-multiplication function for convolution with reordered columns + * @param[in] pA pointer to operand A + * @param[in] pInBuffer pointer to operand B, always conssists of 2 vectors + * @param[in] ch_im_out numRow of A + * @param[in] numCol_A numCol of A + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in] bias the bias + * @param[in,out] pOut pointer to output + * @return The function returns the incremented output pointer + * + * @details This function assumes that data in pInBuffer are reordered + */ + q7_t *arm_nn_mat_mult_kernel_q7_q15_reordered(const q7_t * pA, + const q15_t * pInBuffer, + const uint16_t ch_im_out, + const uint16_t numCol_A, + const uint16_t bias_shift, + const uint16_t out_shift, + const q7_t * bias, + q7_t * pOut); + +#ifdef __cplusplus +} +#endif + +/* + * Other functions + * These layers are typically not timing critical + * Basic implementation is supported here + */ + +#ifdef __cplusplus +extern "C" +{ +#endif + +/** + * @defgroup BasicMath Basic math functions + * + * Element wise add and multiplication functions. + * + */ + +/** + * @brief s8 element wise add of two vectors + * @param[in] input_1_vect pointer to input vector 1 + * @param[in] input_2_vect pointer to input vector 2 + * @param[in] input_1_offset offset for input 1. Range: Range: -127 to 128 + * @param[in] input_1_mult multiplier for input 1 + * @param[in] input_1_shift shift for input 1 + * @param[in] input_2_offset offset for input 2. Range: Range: -127 to 128 + * @param[in] input_2_mult multiplier for input 2 + * @param[in] input_2_shift shift for input 2 + * @param[in] left_shift input left shift + * @param[in,out] output pointer to output vector + * @param[in] out_offset output offset + * @param[in] out_mult output multiplier + * @param[in] out_shift output shift + * @param[in] out_activation_min minimum value to clamp output to + * @param[in] out_activation_max maximum value to clamp output to + * @param[in] block_size number of samples + * @return The function returns ARM_MATH_SUCCESS + */ + arm_status arm_elementwise_add_s8(const int8_t *input_1_vect, + const int8_t *input_2_vect, + const int32_t input_1_offset, + const int32_t input_1_mult, + const int32_t input_1_shift, + const int32_t input_2_offset, + const int32_t input_2_mult, + const int32_t input_2_shift, + const int32_t left_shift, + int8_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t out_activation_min, + const int32_t out_activation_max, + const uint32_t block_size); + +/** + * @brief s8 element wise multiplication + * @param[in] input_1_vect pointer to input vector 1 + * @param[in] input_2_vect pointer to input vector 2 + * @param[in] input_1_offset offset for input 1. Range: Range: -127 to 128 + * @param[in] input_2_offset offset for input 2. Range: Range: -127 to 128 + * @param[in,out] output pointer to output vector + * @param[in] out_offset output offset + * @param[in] out_mult output multiplier + * @param[in] out_shift output shift + * @param[in] out_activation_min minimum value to clamp output to + * @param[in] out_activation_max maximum value to clamp output to + * @param[in] block_size number of samples + * @return The function returns ARM_MATH_SUCCESS + * + * @details Supported framework: TensorFlow Lite micro + */ + arm_status arm_elementwise_mul_s8(const int8_t *input_1_vect, + const int8_t *input_2_vect, + const int32_t input_1_offset, + const int32_t input_2_offset, + int8_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t out_activation_min, + const int32_t out_activation_max, + const uint32_t block_size); +/** + * @defgroup Acti Activation Functions + * + * Perform activation layers, including ReLU (Rectified Linear Unit), + * sigmoid and tanh + * + */ + + /** + * @brief Q7 RELU function + * @param[in,out] data pointer to input + * @param[in] size number of elements + * @return none. + */ + + void arm_relu_q7(q7_t *data, uint16_t size); + + /** + * @brief s8 ReLU6 function + * @param[in,out] data pointer to input + * @param[in] size number of elements + */ + + void arm_relu6_s8(q7_t *data, uint16_t size); + + /** + * @brief Q15 RELU function + * @param[in,out] data pointer to input + * @param[in] size number of elements + * @return none. + */ + + void arm_relu_q15(q15_t *data, uint16_t size); + + /** + * @brief Q7 neural network activation function using direct table look-up + * @param[in,out] data pointer to input + * @param[in] size number of elements + * @param[in] int_width bit-width of the integer part, assume to be smaller than 3 + * @param[in] type type of activation functions + * @return none. + */ + + void arm_nn_activations_direct_q7(q7_t * data, uint16_t size, uint16_t int_width, + arm_nn_activation_type type); + + /** + * @brief Q15 neural network activation function using direct table look-up + * @param[in,out] data pointer to input + * @param[in] size number of elements + * @param[in] int_width bit-width of the integer part, assume to be smaller than 3 + * @param[in] type type of activation functions + * @return none. + * + * @details + * + * This is the direct table look-up approach. + * + * Assume here the integer part of the fixed-point is <= 3. + * More than 3 just not making much sense, makes no difference with + * saturation followed by any of these activation functions. + */ + + void arm_nn_activations_direct_q15(q15_t * data, uint16_t size, uint16_t int_width, + arm_nn_activation_type type); + +/** + * @defgroup Pooling Pooling Functions + * + * Perform pooling functions, including max pooling and average pooling + * + */ + + /** + * @brief Q7 max pooling function + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimension + * @param[in] ch_im_in number of input tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] Im_out pointer to output tensor + * @return none. + * + */ + + void arm_maxpool_q7_HWC(q7_t * Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const uint16_t dim_im_out, + q7_t * bufferA, + q7_t * Im_out); + + /** + * @brief Q7 average pooling function + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimension + * @param[in] ch_im_in number of input tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] Im_out pointer to output tensor + * @return none. + * + */ + + void arm_avepool_q7_HWC(q7_t * Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const uint16_t dim_im_out, + q7_t * bufferA, + q7_t * Im_out); + + /** + * @brief s8 average pooling function + * @param[in] dim_src_height input tensor dimension + * @param[in] dim_src_width input tensor dimension + * @param[in] dim_dst_height output tensor dimension + * @param[in] dim_dst_width output tensor dimension + * @param[in] stride_height stride along y + * @param[in] stride_width stride along x + * @param[in] dim_kernel_height filter kernel size along y + * @param[in] dim_kernel_width filter kernel size along x + * @param[in] padding_height padding size along y + * @param[in] padding_width padding size along x + * @param[in] act_min Min clamping + * @param[in] act_max Max clamping + * @param[in] ch_src number of input tensor channels + * @param[in,out] src pointer to input tensor + * @param[in] bufferA temporary buffer used for optimization and is necessary when + * ARM_MATH_DSP is defined. + * Required space: (ch_src * dim_dst_width) * sizeof(q15_t) bytes + * Use arm_avgpool_s8_get_buffer_size() to get the size + * @param[in,out] dst pointer to output tensor + * @return The function returns one of the following + * ARM_MATH_SIZE_MISMATCH - Unsupported dimension of tensors + * ARM_MATH_SUCCESS - Successful operation + * ARM_MATH_ARGUMENT_ERROR - Implementation not available + * + * @details + * - Supported Framework: TensorFlow Lite + * + */ + + arm_status arm_avgpool_s8(const int dim_src_height, + const int dim_src_width, + const int dim_dst_height, + const int dim_dst_width, + const int stride_height, + const int stride_width, + const int dim_kernel_height, + const int dim_kernel_width, + const int padding_height, + const int padding_width, + const int act_min, + const int act_max, + const int ch_src, + int8_t *src, + int16_t *bufferA, + int8_t *dst); + + /** + * @brief Get the required buffer size for S8 average pooling function + * @param[in] dim_dst_width output tensor dimension + * @param[in] ch_src number of input tensor channels + * @return The function returns required buffer size + * + */ + int32_t arm_avgpool_s8_get_buffer_size(const int dim_dst_width, + const int ch_src); + + /** + * @brief s8 DSP optimized max pooling function + * @param[in] input_y input tensor dimension along y + * @param[in] input_x input tensor dimension along x + * @param[in] output_y output tensor dimension along y + * @param[in] output_x output tensor dimension along x + * @param[in] stride_y stride along y + * @param[in] stride_x stride along x + * @param[in] kernel_y filter kernel size along y + * @param[in] kernel_x filter kernel size along x + * @param[in] pad_y padding size along y + * @param[in] pad_x padding size along x + * @param[in] act_min Activation min. Lower limit to clamp output to. Range: int8 + * @param[in] act_max Activation max. Upper limit to clamp output to. Range: int8 + * @param[in] depth number of input channels + * @param[in] input pointer to input tensor + * @param[in] tmp_buffer Not used. + * @param[in,out] output pointer to output tensor + * @return The function returns one of the following + * ARM_MATH_SIZE_MISMATCH - Unsupported dimension of tensors + * ARM_MATH_SUCCESS - Successful operation + * ARM_MATH_ARGUMENT_ERROR - Implementation not available + * @note The input data is corrupted by this function. + * @details This optimized implementation is recommended when depth is >= 4 and dimensions are large. + * + */ + + arm_status arm_max_pool_s8_opt(const uint16_t input_y, + const uint16_t input_x, + const uint16_t output_y, + const uint16_t output_x, + const uint16_t stride_y, + const uint16_t stride_x, + const uint16_t kernel_y, + const uint16_t kernel_x, + const uint16_t pad_y, + const uint16_t pad_x, + const int8_t act_min, + const int8_t act_max, + const uint16_t depth, + int8_t *input, + int16_t *tmp_buffer, + int8_t *output); + + /** + * @brief s8 pure C max pooling function + * @param[in] input_y input tensor dimension along y + * @param[in] input_x input tensor dimension along x + * @param[in] output_y output tensor dimension along y + * @param[in] output_x output tensor dimension along x + * @param[in] stride_y stride along y + * @param[in] stride_x stride along x + * @param[in] kernel_y filter kernel size along y + * @param[in] kernel_x filter kernel size along x + * @param[in] pad_y padding size along y + * @param[in] pad_x padding size along x + * @param[in] act_min Activation min. Lower limit to clamp output to. Range: int8 + * @param[in] act_max Activation max. Upper limit to clamp output to. Range: int8 + * @param[in] channel_in number of input channels + * @param[in] input pointer to input tensor + * @param[in] tmp_buffer Not used. + * @param[in,out] output pointer to output tensor + * @return The function returns one of the following + * ARM_MATH_SIZE_MISMATCH - Unsupported dimension of tensors + * ARM_MATH_SUCCESS - Successful operation + * ARM_MATH_ARGUMENT_ERROR - Implementation not available + * + * @details + * - This basic implementation is recommended when number of channels is less than 4 and/or + * dimensions are small. + * + */ + arm_status arm_max_pool_s8(const uint16_t input_y, + const uint16_t input_x, + const uint16_t output_y, + const uint16_t output_x, + const uint16_t stride_y, + const uint16_t stride_x, + const uint16_t kernel_y, + const uint16_t kernel_x, + const uint16_t pad_y, + const uint16_t pad_x, + const int8_t act_min, + const int8_t act_max, + const uint16_t channel_in, + int8_t *input, + int16_t *tmp_buffer, + int8_t *output); +/** + * @defgroup Softmax Softmax Functions + * + * EXP(2) based softmax functions. + * + */ + + /** + * @brief Q7 softmax function + * @param[in] vec_in pointer to input vector + * @param[in] dim_vec input vector dimension + * @param[out] p_out pointer to output vector + * + * @note This function is an optimized version which is not bit-accurate with + * TensorFlow Lite's kernel + * + */ + +void arm_softmax_q7(const q7_t * vec_in, const uint16_t dim_vec, q7_t * p_out); + + /** + * @brief Q7 softmax function with batch parameter + * @param[in] vec_in pointer to input vector + * @param[in] nb_batches number of batches + * @param[in] dim_vec input vector dimension + * @param[out] p_out pointer to output vector + * @return none. + * + * @note This function is an optimized version which is not bit-accurate with + * TensorFlow Lite's kernel + * + */ + +void arm_softmax_with_batch_q7(const q7_t * vec_in, const uint16_t nb_batches,const uint16_t dim_vec, q7_t * p_out ); + /** + * @brief Q15 softmax function + * @param[in] vec_in pointer to input vector + * @param[in] dim_vec input vector dimension + * @param[out] p_out pointer to output vector + * @return none. + * + * @note This function is an optimized version which is not bit-accurate with + * TensorFlow Lite's kernel + * + */ + +void arm_softmax_q15(const q15_t * vec_in, const uint16_t dim_vec, q15_t * p_out); + + /** + * @brief S8 softmax function + * @param[in] input Pointer to the input tensor + * @param[in] num_rows Number of rows in the input tensor + * @param[in] row_size Number of elements in each input row + * @param[in] mult Input quantization multiplier + * @param[in] shift Input quantization shift within the range [0, 31] + * @param[in] diff_min Minimum difference with max in row. Used to check if + * the quantized exponential operation can be performed + * @param[out] output Pointer to the output tensor + * + * @note Supported framework: TensorFlow Lite micro (bit-accurate) + * + */ + +void arm_softmax_s8(const int8_t *input, + const int32_t num_rows, + const int32_t row_size, + const int32_t mult, + const int32_t shift, + const int32_t diff_min, + int8_t *output); + + /** + * @brief U8 softmax function + * @param[in] input Pointer to the input tensor + * @param[in] num_rows Number of rows in the input tensor + * @param[in] row_size Number of elements in each input row + * @param[in] mult Input quantization multiplier + * @param[in] shift Input quantization shift within the range [0, 31] + * @param[in] diff_min Minimum difference with max in row. Used to check if + * the quantized exponential operation can be performed + * @param[out] output Pointer to the output tensor + * + * @note Supported framework: TensorFlow Lite micro (bit-accurate) + * + */ + +void arm_softmax_u8(const uint8_t *input, + const int32_t num_rows, + const int32_t row_size, + const int32_t mult, + const int32_t shift, + const int32_t diff_min, + uint8_t *output); + + /** + * @brief uint8 depthwise convolution function with asymmetric quantization for even number of channel multiplier + * and input channels. Unless specified otherwise, arguments are mandatory. + * + * @param[in] input Pointer to input tensor + * @param[in] input_x Width of input tensor + * @param[in] input_y Height of input tensor + * @param[in] input_ch Channels in input tensor + * @param[in] kernel Pointer to kernel weights + * @param[in] kernel_x Width of kernel + * @param[in] kernel_y Height of kernel + * @param[in] ch_mult Number of channel multiplier + * @param[in] pad_x Padding sizes x + * @param[in] pad_y Padding sizes y + * @param[in] stride_x Convolution stride along the width + * @param[in] stride_y Convolution stride along the height + * @param[in] dilation_x Dilation along width. Not used and intended for future enhancement. + * @param[in] dilation_y Dilation along height. Not used and intended for future enhancement. + * @param[in] bias Pointer to optional bias values. If no bias is + * availble, NULL is expected + * @param[in] input_offset Input tensor zero offset + * @param[in] filter_offset Kernel tensor zero offset + * @param[in] output_offset Output tensor zero offset + * @param[in,out] output Pointer to output tensor + * @param[in] output_x Width of output tensor + * @param[in] output_y Height of output tensor + * @param[in] output_activation_min Minimum value to clamp the output to. Range : {0, 255} + * @param[in] output_activation_max Minimum value to clamp the output to. Range : {0, 255} + * @param[in] out_shift Amount of right-shift for output + * @param[in] out_mult Output multiplier for requantization + * @return The function returns one of the following + * ARM_MATH_SIZE_MISMATCH - Unsupported dimension of tensors + * ARM_MATH_SUCCESS - Successful operation + * ARM_MATH_ARGUMENT_ERROR - Implementation not available + * + * Input constraints + * ch_mult is multiple of 2 + * kernel_x is multiple of 2 + * + */ + arm_status arm_depthwise_conv_u8_basic_ver1(const uint8_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_ch, + const uint8_t *kernel, + const uint16_t kernel_x, + const uint16_t kernel_y, + const int16_t ch_mult, + const int16_t pad_x, + const int16_t pad_y, + const int16_t stride_x, + const int16_t stride_y, + const int16_t dilation_x, + const int16_t dilation_y, + const int32_t *bias, + const int32_t input_offset, + const int32_t filter_offset, + const int32_t output_offset, + uint8_t *output, + const uint16_t output_x, + const uint16_t output_y, + const int32_t output_activation_min, + const int32_t output_activation_max, + const int32_t out_shift, + const int32_t out_mult); + +/** + * @defgroup Reshape Reshape Functions + * + */ + + /** + * @brief Reshape a s8 vector into another with different shape + * @param[in] input points to the s8 input vector + * @param[out] output points to the s8 output vector + * @param[in] total_size total size of the input and output vectors in bytes + * + * @note The output is expected to be in a memory area that does not overlap with the input's + * + */ + void arm_reshape_s8(const int8_t *input, + int8_t *output, + const uint32_t total_size); + +/** + * @defgroup Concatenation Concatenation Functions + * + */ + + /** + * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the X axis + * This function should be called for each input tensor to concatenate. The argument offset_x + * will be used to store the input tensor in the correct position in the output tensor + * + * i.e. offset_x = 0 + * for(i = 0 i < num_input_tensors; ++i) + * { + * arm_concatenation_s8_x(&input[i], ..., &output, ..., ..., offset_x) + * offset_x += input_x[i] + * } + * + * This function assumes that the output tensor has: + * -# The same height of the input tensor + * -# The same number of channels of the input tensor + * -# The same batch size of the input tensor + * + * Unless specified otherwise, arguments are mandatory. + * + * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because does not involve any arithmetic operation + * + * @param[in] input Pointer to input tensor + * @param[in] input_x Width of input tensor + * @param[in] input_y Height of input tensor + * @param[in] input_z Channels in input tensor + * @param[in] input_w Batch size in input tensor + * @param[out] output Pointer to output tensor + * @param[in] output_x Width of output tensor + * @param[in] offset_x The offset (in number of elements) on the X axis to start concatenating the input tensor + * It is user responsibility to provide the correct value + * + * Input constraints + * offset_x is less than output_x + * + */ + void arm_concatenation_s8_x(const int8_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_z, + const uint16_t input_w, + int8_t *output, + const uint16_t output_x, + const uint32_t offset_x); + + /** + * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the Y axis + * This function should be called for each input tensor to concatenate. The argument offset_y + * will be used to store the input tensor in the correct position in the output tensor + * + * i.e. offset_y = 0 + * for(i = 0 i < num_input_tensors; ++i) + * { + * arm_concatenation_s8_y(&input[i], ..., &output, ..., ..., offset_y) + * offset_y += input_y[i] + * } + * + * This function assumes that the output tensor has: + * -# The same width of the input tensor + * -# The same number of channels of the input tensor + * -# The same batch size of the input tensor + * + * Unless specified otherwise, arguments are mandatory. + * + * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because does not involve any arithmetic operation + * + * @param[in] input Pointer to input tensor + * @param[in] input_x Width of input tensor + * @param[in] input_y Height of input tensor + * @param[in] input_z Channels in input tensor + * @param[in] input_w Batch size in input tensor + * @param[out] output Pointer to output tensor + * @param[in] output_y Height of output tensor + * @param[in] offset_y The offset on the Y axis to start concatenating the input tensor + * It is user responsibility to provide the correct value + * + * Input constraints + * offset_y is less than output_y + * + */ + void arm_concatenation_s8_y(const int8_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_z, + const uint16_t input_w, + int8_t *output, + const uint16_t output_y, + const uint32_t offset_y); + + /** + * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the Z axis + * This function should be called for each input tensor to concatenate. The argument offset_z + * will be used to store the input tensor in the correct position in the output tensor + * + * i.e. offset_z = 0 + * for(i = 0 i < num_input_tensors; ++i) + * { + * arm_concatenation_s8_z(&input[i], ..., &output, ..., ..., offset_z) + * offset_z += input_z[i] + * } + * + * This function assumes that the output tensor has: + * -# The same width of the input tensor + * -# The same height of the input tensor + * -# The same batch size of the input tensor + * + * Unless specified otherwise, arguments are mandatory. + * + * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because does not involve any arithmetic operation + * + * @param[in] input Pointer to input tensor + * @param[in] input_x Width of input tensor + * @param[in] input_y Height of input tensor + * @param[in] input_z Channels in input tensor + * @param[in] input_w Batch size in input tensor + * @param[out] output Pointer to output tensor + * @param[in] output_z Channels in output tensor + * @param[in] offset_z The offset on the Z axis to start concatenating the input tensor + * It is user responsibility to provide the correct value + * + * Input constraints + * offset_z is less than output_z + * + */ + void arm_concatenation_s8_z(const int8_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_z, + const uint16_t input_w, + int8_t *output, + const uint16_t output_z, + const uint32_t offset_z); + + /** + * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the W axis (Batch size) + * This function should be called for each input tensor to concatenate. The argument offset_w + * will be used to store the input tensor in the correct position in the output tensor + * + * i.e. offset_w = 0 + * for(i = 0 i < num_input_tensors; ++i) + * { + * arm_concatenation_s8_w(&input[i], ..., &output, ..., ..., offset_w) + * offset_w += input_w[i] + * } + * + * This function assumes that the output tensor has: + * -# The same width of the input tensor + * -# The same height of the input tensor + * -# The same number o channels of the input tensor + * + * Unless specified otherwise, arguments are mandatory. + * + * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because does not involve any arithmetic operation + * + * @param[in] input Pointer to input tensor + * @param[in] input_x Width of input tensor + * @param[in] input_y Height of input tensor + * @param[in] input_z Channels in input tensor + * @param[in] input_w Batch size in input tensor + * @param[out] output Pointer to output tensor + * @param[in] offset_w The offset on the W axis to start concatenating the input tensor + * It is user responsibility to provide the correct value + * + */ + void arm_concatenation_s8_w(const int8_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_z, + const uint16_t input_w, + int8_t *output, + const uint32_t offset_w); +#ifdef __cplusplus +} +#endif + +#endif diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/NN/Include/arm_nnsupportfunctions.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/NN/Include/arm_nnsupportfunctions.h new file mode 100755 index 00000000000..568ab42fa23 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/CMSIS/NN/Include/arm_nnsupportfunctions.h @@ -0,0 +1,905 @@ +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nnsupportfunctions.h + * Description: Public header file of support functions for CMSIS NN Library + * + * $Date: March 17, 2020 + * $Revision: V.4.0.3 + * + * Target Processor: Cortex-M cores + * -------------------------------------------------------------------- */ + +#ifndef _ARM_NNSUPPORTFUNCTIONS_H_ +#define _ARM_NNSUPPORTFUNCTIONS_H_ + +#include "arm_math.h" +#include "arm_common_tables.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + +#define LEFT_SHIFT(_shift) (_shift > 0 ? _shift : 0) +#define RIGHT_SHIFT(_shift) (_shift > 0 ? 0 : -_shift) +#define MASK_IF_ZERO(x) (x) == 0 ? ~0 : 0 +#define MASK_IF_NON_ZERO(x) (x) != 0 ? ~0 : 0 +#define SELECT_USING_MASK(mask, a, b) ((mask) & (a)) ^ (~(mask) & (b)) + +#define MAX(A,B) ((A) > (B) ? (A) : (B)) +#define MIN(A,B) ((A) < (B) ? (A) : (B)) +#define CLAMP(x, h, l) MAX(MIN((x), (h)), (l)) + +/** + * @brief Union for SIMD access of q31/q15/q7 types + */ +union arm_nnword +{ + q31_t word; + /**< q31 type */ + q15_t half_words[2]; + /**< q15 type */ + q7_t bytes[4]; + /**< q7 type */ +}; + +/** + * @brief Struct for specifying activation function types + * + */ +typedef enum +{ + ARM_SIGMOID = 0, + /**< Sigmoid activation function */ + ARM_TANH = 1, + /**< Tanh activation function */ +} arm_nn_activation_type; + +/** + * @defgroup nndata_convert Neural Network Data Conversion Functions + * + * Perform data type conversion in-between neural network operations + * + */ + +/** + * @brief Converts the elements of the q7 vector to q15 vector without left-shift + * @param[in] *pSrc points to the q7 input vector + * @param[out] *pDst points to the q15 output vector + * @param[in] blockSize length of the input vector + * + */ +void arm_q7_to_q15_no_shift(const q7_t * pSrc, q15_t * pDst, uint32_t blockSize); + +/** + * @brief Non-saturating addition of elements of a q7 vector + * @param[in] *input Pointer to the q7 input vector + * @param[out] *output Pointer to the q31 output variable. + * @param[in] block_size length of the input vector + * \par Description: + * + * 2^24 samples can be added without saturating the result. + * + * The equation used for the conversion process is: + * + *
+ *  sum = input[0] + input[1] + .. + input[block_size -1]
+ * 
+ * + * */ +void arm_nn_add_q7(const q7_t *input, q31_t *output, uint32_t block_size); + +/** + * @brief Converts the elements of the q7 vector to reordered q15 vector without left-shift + * @param[in] *pSrc points to the q7 input vector + * @param[out] *pDst points to the q15 output vector + * @param[in] blockSize length of the input vector + * @return none. + * + */ +void arm_q7_to_q15_reordered_no_shift(const q7_t * pSrc, q15_t * pDst, uint32_t blockSize); + +/** + * @brief Converts the elements from a q7 vector to a q15 vector with an added offset + * @param[in] src pointer to the q7 input vector + * @param[out] dst pointer to the q15 output vector + * @param[in] block_size length of the input vector + * @param[in] offset q7 offset to be added to each input vector element. + * + * \par Description: + * + * The equation used for the conversion process is: + * + *
+ *  dst[n] = (q15_t) src[n] + offset;   0 <= n < block_size.
+ * 
+ * + */ +void arm_q7_to_q15_with_offset(const q7_t *src, q15_t *dst, uint32_t block_size, q15_t offset); + +/** + * @brief Converts the elements of the q7 vector to reordered q15 vector with an added offset + * @param[in] src pointer to the q7 input vector + * @param[out] dst pointer to the q15 output vector + * @param[in] block_size length of the input vector + * @param[in] offset offset to be added to each input vector element. + * @return none. + * + * @details This function does the q7 to q15 expansion with re-ordering of bytes. Re-ordering is a consequence of + * the sign extension intrinsic(DSP extension). The tail (i.e., last (N % 4) elements) retains its original + * order. + * + */ +void arm_q7_to_q15_reordered_with_offset(const q7_t *src, q15_t *dst, uint32_t block_size, q15_t offset); + +/** + * @brief Converts the elements from a q7 vector and accumulate to a q15 vector + * @param[in] *src points to the q7 input vector + * @param[out] *dst points to the q15 output vector + * @param[in] block_size length of the input vector + * + * \par Description: + * + * The equation used for the conversion process is: + * + *
+ *  dst[n] += (q15_t) src[n] ;   0 <= n < block_size.
+ * 
+ * + */ +void arm_nn_accumulate_q7_to_q15(q15_t *dst, const q7_t *src, uint32_t block_size); + +/** + * @brief Depthwise conv on an im2col buffer where the input channel equals output channel. + * @param[in] row pointer to row + * @param[in] col pointer to im2col buffer, always consists of 2 columns. + * @param[in] num_ch number of channels + * @param[in] out_shift pointer to per output channel requantization shift parameter. + * @param[in] out_mult pointer to per output channel requantization multiplier parameter. + * @param[in] out_offset output tensor offset. + * @param[in] activation_min minimum value to clamp the output to. Range : int8 + * @param[in] activation_max maximum value to clamp the output to. Range : int8 + * @param[in] kernel_size number of elements in one column. + * @param[in] output_bias per output channel bias. Range : int32 + * @param[out] out pointer to output + * @return The function returns one of the two + * 1. The incremented output pointer for a successful operation or + * 2. NULL if implementation is not available. + * + * @details Supported framework: TensorFlow Lite micro. + */ +q7_t *arm_nn_depthwise_conv_s8_core(const q7_t *row, + const q15_t *col, + const uint16_t num_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t kernel_size, + const int32_t *const output_bias, + q7_t *out); + +/** + * @brief General Matrix-multiplication function with per-channel requantization. + * @param[in] input_row pointer to row operand + * @param[in] input_col pointer to col operand + * @param[in] output_ch number of rows of input_row + * @param[in] col_batches number of column batches. Range: 1 to 4 + * @param[in] output_shift pointer to per output channel requantization shift parameter. + * @param[in] output_mult pointer to per output channel requantization multiplier parameter. + * @param[in] out_offset output tensor offset. + * @param[in] col_offset input tensor(col) offset. + * @param[in] row_offset kernel offset(row). Not used. + * @param[in] out_activation_min minimum value to clamp the output to. Range : int8 + * @param[in] out_activation_max maximum value to clamp the output to. Range : int8 + * @param[in] row_len number of elements in each row + * @param[in] bias per output channel bias. Range : int32 + * @param[in,out] out pointer to output + * @return The function returns one of the two + * 1. The incremented output pointer for a successful operation or + * 2. NULL if implementation is not available. + * + * @details Supported framework: TensorFlow Lite +*/ +q7_t *arm_nn_mat_mult_s8(const q7_t *input_row, + const q7_t *input_col, + const uint16_t output_ch, + const uint16_t col_batches, + const int32_t *output_shift, + const int32_t *output_mult, + const int32_t out_offset, + const int32_t col_offset, + const int32_t row_offset, + const int16_t out_activation_min, + const int16_t out_activation_max, + const uint16_t row_len, + const int32_t *const bias, + q7_t *out); + +/** + * @brief General Matrix-multiplication without requantization for one row & one column + * @param[in] row_elements number of row elements + * @param[in] row_base pointer to row operand + * @param[in] col_base pointer to col operand + * @param[out] sum_col pointer to store sum of column elements + * @param[out] output pointer to store result of multiply-accumulate + * @return The function returns the multiply-accumulated result of the row by column. + * + * @details Pseudo-code + * *output = 0 + * sum_col = 0 + * for (i = 0; i < row_elements; i++) + * *output += row_base[i] * col_base[i] + * sum_col += col_base[i] + * +*/ +arm_status arm_nn_mat_mul_core_1x_s8(int32_t row_elements, + const int8_t *row_base, + const int8_t *col_base, + int32_t *const sum_col, + int32_t *const output); + +/** + * @brief General Matrix-multiplication without requantization for four rows and one column + * @param[in] row_elements number of row elements + * @param[in] offset offset between rows. Can be the same as row_elements. + * For e.g, in a 1x1 conv scenario with stride as 1. + * @param[in] row_base pointer to row operand + * @param[in] col_base pointer to col operand + * @param[out] sum_col pointer to store sum of column elements + * @param[out] output pointer to store result(4 int32's) of multiply-accumulate + * @return The function returns the multiply-accumulated result of the row by column + * + * @details Pseudo-code + * output[0] = 0 + * .. + * output[3] = 0 + * sum_col = 0 + * for (i = 0; i < row_elements; i++) + * output[0] += row_base[i] * col_base[i] + * .. + * output[3] += row_base[i + (row_elements * 3)] * col_base[i] + * sum_col += col_base[i] +*/ +arm_status arm_nn_mat_mul_core_4x_s8(const int32_t row_elements, + const int32_t offset, + const int8_t *row_base, + const int8_t *col_base, + int32_t *const sum_col, + int32_t *const output); + +/** +* @brief General Matrix-multiplication function with per-channel requantization. +* This function assumes: +* - LHS input matrix NOT transposed (nt) +* - RHS input matrix transposed (t) +* +* @note This operation also performs the broadcast bias addition before the requantization +* +* @param[in] lhs Pointer to the LHS input matrix +* @param[in] rhs Pointer to the RHS input matrix +* @param[in] bias Pointer to the bias vector. The length of this vector is equal to the number of output columns (or RHS input rows) +* @param[out] dst Pointer to the output matrix with "m" rows and "n" columns +* @param[in] dst_multipliers Pointer to the multipliers vector needed for the per-channel requantization. The length of this vector is equal to +* the number of output columns (or RHS input rows) +* @param[in] dst_shifts Pointer to the shifts vector needed for the per-channel requantization. The length of this vector is equal to +* the number of output columns (or RHS input rows) +* @param[in] lhs_rows Number of LHS input rows +* @param[in] rhs_rows Number of RHS input rows +* @param[in] rhs_cols Number of LHS/RHS input columns +* @param[in] lhs_offset Offset to be applied to the LHS input value +* @param[in] dst_offset Offset to be applied the output result +* @param[in] activation_min Minimum value to clamp down the output. Range : int8 +* @param[in] activation_max Maximum value to clamp up the output. Range : int8 +* +* @return The function returns ARM_MATH_SUCCESS +* +*/ +arm_status arm_nn_mat_mult_nt_t_s8(const q7_t *lhs, + const q7_t *rhs, + const q31_t *bias, + q7_t *dst, + const int32_t *dst_multipliers, + const int32_t *dst_shifts, + const int32_t lhs_rows, + const int32_t rhs_rows, + const int32_t rhs_cols, + const int32_t lhs_offset, + const int32_t dst_offset, + const int32_t activation_min, + const int32_t activation_max); + +/** + * @brief s8 Vector by Matrix (transposed) multiplication + * + * @param[in] lhs Input left-hand side vector + * @param[in] rhs Input right-hand side matrix (transposed) + * @param[in] bias Input bias + * @param[out] dst Output vector + * @param[in] lhs_offset Offset to be added to the input values of the left-hand side vector. Range: -127 to 128 + * @param[in] rhs_offset Offset to be added to the input values of the right-hand side matrix. Range: -127 to 128 + * @param[in] dst_offset Offset to be added to the output values. Range: -127 to 128 + * @param[in] dst_multiplier Output multiplier + * @param[in] dst_shift Output shift + * @param[in] rhs_cols Number of columns in the right-hand side input matrix + * @param[in] rhs_rows Number of rows in the right-hand side input matrix + * @param[in] activation_min Minimum value to clamp the output to. Range: int8 + * @param[in] activation_max Maximum value to clamp the output to. Range: int8 + * + * @return The function returns ARM_MATH_SUCCESS + * + */ +arm_status arm_nn_vec_mat_mult_t_s8(const q7_t *lhs, + const q7_t *rhs, + const q31_t *bias, + q7_t *dst, + const int32_t lhs_offset, + const int32_t rhs_offset, + const int32_t dst_offset, + const int32_t dst_multiplier, + const int32_t dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t activation_min, + const int32_t activation_max); + +/** + * @brief Depthwise convolution of transposed rhs matrix with 4 lhs matrices. To be used in padded cases where + * the padding is -lhs_offset(Range: int8). Dimensions are the same for lhs and rhs. + * + * @param[in] lhs Input left-hand side matrix + * @param[in] rhs Input right-hand side matrix (transposed) + * @param[in] lhs_offset LHS matrix offset(input offset). Range: -127 to 128 + * @param[in] num_ch Number of channels in LHS/RHS + * @param[in] out_shift Per channel output shift. Length of vector is equal to number of channels + * @param[in] out_mult Per channel output multiplier. Length of vector is equal to number of channels + * @param[in] out_offset Offset to be added to the output values. Range: -127 to 128 + * @param[in] activation_min Minimum value to clamp the output to. Range: int8 + * @param[in] activation_max Maximum value to clamp the output to. Range: int8 + * @param[in] row_x_col (row_dimension * col_dimension) of LHS/RHS matrix + * @param[in] output_bias Per channel output bias. Length of vector is equal to number of channels + * @param[in] out Output pointer + * + * @return The function returns one of the two + * - Updated output pointer if an implementaiton is available + * - NULL if no implementation is available. + * + * @note If number of channels is not a multiple of 4, upto 3 elements outside the boundary will be read out + * for the following. + * - Output shift + * - Output multiplier + * - Output bias + * - rhs + */ +q7_t *arm_nn_depthwise_conv_nt_t_padded_s8(const q7_t *lhs, + const q7_t *rhs, + const int32_t lhs_offset, + const uint16_t num_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t row_x_col, + const int32_t *const output_bias, + q7_t *out); + +/** + * @brief Depthwise convolution of transposed rhs matrix with 4 lhs matrices. To be used in non-padded cases. + * Dimensions are the same for lhs and rhs. + * + * @param[in] lhs Input left-hand side matrix + * @param[in] rhs Input right-hand side matrix (transposed) + * @param[in] lhs_offset LHS matrix offset(input offset). Range: -127 to 128 + * @param[in] num_ch Number of channels in LHS/RHS + * @param[in] out_shift Per channel output shift. Length of vector is equal to number of channels. + * @param[in] out_mult Per channel output multiplier. Length of vector is equal to number of channels. + * @param[in] out_offset Offset to be added to the output values. Range: -127 to 128 + * @param[in] activation_min Minimum value to clamp the output to. Range: int8 + * @param[in] activation_max Maximum value to clamp the output to. Range: int8 + * @param[in] row_x_col (row_dimension * col_dimension) of LHS/RHS matrix + * @param[in] output_bias Per channel output bias. Length of vector is equal to number of channels. + * @param[in] out Output pointer + * + * @return The function returns one of the two + * - Updated output pointer if an implementaiton is available + * - NULL if no implementation is available. + * + * @note If number of channels is not a multiple of 4, upto 3 elements outside the boundary will be read out + * for the following. + * - Output shift + * - Output multiplier + * - Output bias + * - rhs + */ +q7_t *arm_nn_depthwise_conv_nt_t_s8(const q7_t *lhs, + const q7_t *rhs, + const int32_t lhs_offset, + const uint16_t num_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t row_x_col, + const int32_t *const output_bias, + q7_t *out); + +/** + @brief Read 2 q15 elements and post increment pointer. + @param[in] in_q15 Pointer to pointer that holds address of input. + @return q31 value + */ +__STATIC_FORCEINLINE q31_t arm_nn_read_q15x2_ia(const q15_t **in_q15) +{ + q31_t val; + + memcpy(&val, *in_q15, 4); + *in_q15 += 2; + + return (val); +} + +/** + @brief Read 4 q7 from q7 pointer and post increment pointer. + @param[in] in_q7 Pointer to pointer that holds address of input. + @return q31 value + */ +__STATIC_FORCEINLINE q31_t arm_nn_read_q7x4_ia(const q7_t **in_q7) +{ + q31_t val; + memcpy(&val, *in_q7, 4); + *in_q7 += 4; + + return (val); +} + +/** + @brief Read 2 q15 from q15 pointer. + @param[in] in_q15 pointer to address of input. + @return q31 value + */ +__STATIC_FORCEINLINE q31_t arm_nn_read_q15x2(const q15_t *in_q15) +{ + q31_t val; + memcpy(&val, in_q15, 4); + + return (val); +} + +/** + @brief Read 4 q7 values. + @param[in] in_q7 pointer to address of input. + @return q31 value + */ +__STATIC_FORCEINLINE q31_t arm_nn_read_q7x4(const q7_t *in_q7) +{ + q31_t val; + memcpy(&val, in_q7, 4); + + return (val); +} + +/** + * @brief memset optimized for MVE + * @param[in, out] dst Destination pointer + * @param[in] val Value to set + * @param[in] block_size Number of bytes to copy. + * + */ +__STATIC_FORCEINLINE void arm_memset_q7(q7_t *dst, + const q7_t val, + uint32_t block_size) +{ +#if defined(ARM_MATH_MVEI) + __asm volatile ( + " vdup.8 q0, %[set_val] \n" + " wlstp.8 lr, %[cnt], 1f \n" + "2: \n" + " vstrb.8 q0, [%[in]], 16 \n" + " letp lr, 2b \n" + "1: \n" + :[in] "+r"(dst) + :[cnt] "r"(block_size), [set_val] "r"(val) + :"q0", "memory", "r14"); +#else + memset(dst, val, block_size); +#endif +} + +#if defined (ARM_MATH_DSP) + +/** + * @brief read and expand one q7 word into two q15 words + */ + +__STATIC_FORCEINLINE const q7_t *read_and_pad(const q7_t *source, q31_t * out1, q31_t * out2) +{ + q31_t inA = arm_nn_read_q7x4_ia(&source); + q31_t inAbuf1 = __SXTB16(__ROR(inA, 8)); + q31_t inAbuf2 = __SXTB16(inA); + +#ifndef ARM_MATH_BIG_ENDIAN + *out2 = __PKHTB(inAbuf1, inAbuf2, 16); + *out1 = __PKHBT(inAbuf2, inAbuf1, 16); +#else + *out1 = __PKHTB(inAbuf1, inAbuf2, 16); + *out2 = __PKHBT(inAbuf2, inAbuf1, 16); +#endif + + return source; +} + +/** + * @brief read and expand one q7 word into two q15 words with reordering + */ + +__STATIC_FORCEINLINE const q7_t *read_and_pad_reordered(const q7_t *source, q31_t * out1, q31_t * out2) +{ + q31_t inA = arm_nn_read_q7x4_ia(&source); +#ifndef ARM_MATH_BIG_ENDIAN + *out2 = __SXTB16(__ROR(inA, 8)); + *out1 = __SXTB16(inA); +#else + *out1 = __SXTB16(__ROR(inA, 8)); + *out2 = __SXTB16(inA); +#endif + + return source; +} + +/** + * @brief read and expand one q7 word into two q15 words with reordering and add an offset + */ +__STATIC_FORCEINLINE const q7_t *read_and_pad_reordered_with_offset(const q7_t *source, q31_t * out1, q31_t * out2, q31_t offset) +{ + q31_t inA = arm_nn_read_q7x4_ia(&source); + +#ifndef ARM_MATH_BIG_ENDIAN + *out2 = __SXTB16(__ROR(inA, 8)); + *out1 = __SXTB16(inA); +#else + *out1 = __SXTB16(__ROR(inA, 8)); + *out2 = __SXTB16(inA); +#endif + *out1 = __QADD16(*out1,offset); + *out2 = __QADD16(*out2,offset); + + return source; +} + +#endif + + + +/** + * @defgroup NNBasicMath Basic Math Functions for Neural Network Computation + * + * Basic Math Functions for Neural Network Computation + * + */ + +/** + * @brief q7 vector multiplication with variable output shifts + * @param[in] *pSrcA pointer to the first input vector + * @param[in] *pSrcB pointer to the second input vector + * @param[out] *pDst pointer to the output vector + * @param[in] out_shift amount of right-shift for output + * @param[in] blockSize number of samples in each vector + * @return none. + * + * Scaling and Overflow Behavior: + * \par + * The function uses saturating arithmetic. + * Results outside of the allowable q15 range [0x8000 0x7FFF] will be saturated. + */ + +void arm_nn_mult_q15( + q15_t * pSrcA, + q15_t * pSrcB, + q15_t * pDst, + const uint16_t out_shift, + uint32_t blockSize); + +/** + * @brief q7 vector multiplication with variable output shifts + * @param[in] *pSrcA pointer to the first input vector + * @param[in] *pSrcB pointer to the second input vector + * @param[out] *pDst pointer to the output vector + * @param[in] out_shift amount of right-shift for output + * @param[in] blockSize number of samples in each vector + * @return none. + * + * Scaling and Overflow Behavior: + * \par + * The function uses saturating arithmetic. + * Results outside of the allowable q7 range [0x80 0x7F] will be saturated. + */ + +void arm_nn_mult_q7( + q7_t * pSrcA, + q7_t * pSrcB, + q7_t * pDst, + const uint16_t out_shift, + uint32_t blockSize); + +/** + * @brief macro for adding rounding offset + */ +#ifndef ARM_NN_TRUNCATE + #define NN_ROUND(out_shift) ( (0x1u << out_shift) >> 1 ) +#else + #define NN_ROUND(out_shift) 0 +#endif + +// Macros for shortening quantization functions' names and avoid long lines +#define MUL_SAT(a, b) arm_nn_sat_doubling_high_mult((a), (b)) +#define MUL_SAT_MVE(a, b) arm_sat_doubling_high_mult_mve_32x4((a), (b)) +#define MUL_POW2(a, b) arm_nn_mult_by_power_of_two((a), (b)) + + +#define DIV_POW2(a, b) arm_nn_divide_by_power_of_two((a), (b)) +#define DIV_POW2_MVE(a, b) arm_divide_by_power_of_two_mve((a), (b)) + + +#define EXP_ON_NEG(x) arm_nn_exp_on_negative_values((x)) +#define ONE_OVER1(x) arm_nn_one_over_one_plus_x_for_x_in_0_1((x)) + +/** + * @brief Saturating doubling high multiply. Result matches + * NEON instruction VQRDMULH. + * @param[in] m1 Multiplicand + * @param[in] m2 Multiplier + * @return Result of multiplication. + * + */ +__STATIC_FORCEINLINE q31_t arm_nn_sat_doubling_high_mult(const q31_t m1, const q31_t m2) +{ + q31_t result = 0; + // Rounding offset to add for a right shift of 31 + q63_t mult = 1 << 30; + + if ((m1 < 0) ^ (m2 < 0)) + { + mult = 1 - mult; + } + // Gets resolved as a SMLAL instruction + mult = mult + (q63_t)m1 * m2; + + // Utilize all of the upper 32 bits. This is the doubling step + // as well. + result = mult / (1UL << 31); + + if ((m1 == m2) && (m1 == (int32_t)Q31_MIN)) + { + result = Q31_MAX; + } + return result; +} + +/** + * @brief Rounding divide by power of two. + * @param[in] dividend - Dividend + * @param[in] exponent - Divisor = power(2, exponent) + * Range: [0, 31] + * @return Rounded result of division. Midpoint is rounded away from zero. + * + */ +__STATIC_FORCEINLINE q31_t arm_nn_divide_by_power_of_two(const q31_t dividend, const q31_t exponent) +{ + q31_t result = 0; + const q31_t remainder_mask = (1l << exponent) - 1; + int32_t remainder = remainder_mask & dividend; + + // Basic division + result = dividend >> exponent; + + // Adjust 'result' for rounding (mid point away from zero) + q31_t threshold = remainder_mask >> 1; + if (result < 0) + { + threshold++; + } + if (remainder > threshold) + { + result++; + } + + return result; +} + +/** + * @brief Requantize a given value. + * @param[in] val Value to be requantized + * @param[in] multiplier multiplier + * @param[in] shift left or right shift for 'val * multiplier' + * + * @return Returns (val * multiplier)/(2 ^ shift) + * + */ +__STATIC_FORCEINLINE q31_t arm_nn_requantize(const q31_t val, const q31_t multiplier, const q31_t shift) +{ + return arm_nn_divide_by_power_of_two(arm_nn_sat_doubling_high_mult(val * (1 << LEFT_SHIFT(shift)), multiplier), + RIGHT_SHIFT(shift)); +} + +/** + * @brief memcpy optimized for MVE + * @param[in, out] dst Destination pointer + * @param[in] src Source pointer. + * @param[in] block_size Number of bytes to copy. + * + */ +__STATIC_FORCEINLINE void arm_memcpy_q7(q7_t *__RESTRICT dst, + const q7_t *__RESTRICT src, + uint32_t block_size) +{ +#if defined(ARM_MATH_MVEI) + __asm volatile ( + " wlstp.8 lr, %[cnt], 1f \n" + "2: \n" + " vldrb.8 q0, [%[in]], 16 \n" + " vstrb.8 q0, [%[out]], 16 \n" + " letp lr, 2b \n" + "1: \n" + :[in] "+r"(src) + ,[out] "+r"(dst) + :[cnt] "r"(block_size) + :"q0", "memory", "r14"); +#else + memcpy(dst, src, block_size); +#endif +} + +#if defined(ARM_MATH_MVEI) +/** + * @brief Vector saturating doubling high multiply returning high half. + * @param[in] m1 Multiplicand + * @param[in] m2 Multiplier + * @return Result of multiplication. + * + */ +__STATIC_FORCEINLINE int32x4_t arm_sat_doubling_high_mult_mve(const int32x4_t m1, const q31_t m2) +{ + return vqrdmulhq_n_s32(m1, m2); +} + +/** + * @brief Vector rounding divide by power of two. + * @param[in] dividend - Dividend vector + * @param[in] exponent - Divisor = power(2, exponent) + * Range: [0, 31] + * @return Rounded result of division. Midpoint is rounded away from zero. + * + */ +__STATIC_FORCEINLINE int32x4_t arm_divide_by_power_of_two_mve(const int32x4_t dividend, const q31_t exponent) +{ + const int32x4_t shift = vdupq_n_s32(-exponent); + const int32x4_t fixup = vshrq_n_s32(vandq_s32(dividend, shift), 31); + const int32x4_t fixed_up_dividend = vqaddq_s32(dividend, fixup); + return vrshlq_s32(fixed_up_dividend, shift); +} + +/** + * @brief Requantize a given vector. + * @param[in] val Vector to be requantized + * @param[in] multiplier multiplier + * @param[in] shift shift + * + * @return Returns (val * multiplier)/(2 ^ shift) + * + */ +__STATIC_FORCEINLINE int32x4_t arm_requantize_mve(const int32x4_t val, const q31_t multiplier, const q31_t shift) +{ + return arm_divide_by_power_of_two_mve( + arm_sat_doubling_high_mult_mve(vshlq_s32(val, vdupq_n_s32(LEFT_SHIFT(shift))), multiplier), + RIGHT_SHIFT(shift)); +} + +__STATIC_FORCEINLINE int32x4_t arm_sat_doubling_high_mult_mve_32x4(const int32x4_t m1, const int32x4_t m2) +{ + return vqrdmulhq_s32(m1, m2); +} + +__STATIC_FORCEINLINE int32x4_t arm_divide_by_power_of_two_mve_32x4(const int32x4_t dividend, const int32x4_t exponent) +{ + const int32x4_t shift = -exponent; + const int32x4_t fixup = vshrq_n_s32(vandq_s32(dividend, shift), 31); + const int32x4_t fixed_up_dividend = vqaddq_s32(dividend, fixup); + return vrshlq_s32(fixed_up_dividend, shift); +} + +__STATIC_FORCEINLINE int32x4_t arm_requantize_mve_32x4(const int32x4_t val, const int32x4_t multiplier, const int32x4_t shift) +{ + const int32x4_t zz = vdupq_n_s32(0); + const mve_pred16_t p = vcmpgtq_n_s32(shift, 0); + + const int32x4_t left_shift = vpselq_s32(shift, zz, p); + const int32x4_t right_shift = -vpselq_s32(zz, shift, p); + + return arm_divide_by_power_of_two_mve_32x4(arm_sat_doubling_high_mult_mve_32x4(vshlq_s32(val, left_shift), multiplier), right_shift); +} +#endif + +// @note The following functions are used only for softmax layer, scaled bits = 5 assumed + +__STATIC_FORCEINLINE int32_t arm_nn_exp_on_negative_values(int32_t val) +{ + int32_t mask = 0; + int32_t shift = 24; + + const int32_t val_mod_minus_quarter = (val & ((1 << shift) - 1)) - (1 << shift); + const int32_t remainder = val_mod_minus_quarter - val; + const int32_t x = (val_mod_minus_quarter << 5) + (1 << 28); + const int32_t x2 = MUL_SAT(x, x); + + int32_t result = 1895147668 + MUL_SAT(1895147668, x + + DIV_POW2(MUL_SAT(DIV_POW2(MUL_SAT(x2, x2), 2) + MUL_SAT(x2, x), 715827883) + x2, 1)); + +#define SELECT_IF_NON_ZERO(x) \ +{ \ + mask = MASK_IF_NON_ZERO(remainder & (1 << shift++)); \ + result = SELECT_USING_MASK(mask, MUL_SAT(result, x), result); \ +} + + SELECT_IF_NON_ZERO(1672461947) + SELECT_IF_NON_ZERO(1302514674) + SELECT_IF_NON_ZERO(790015084) + SELECT_IF_NON_ZERO(290630308) + SELECT_IF_NON_ZERO(39332535) + SELECT_IF_NON_ZERO(720401) + SELECT_IF_NON_ZERO(242) + +#undef SELECT_IF_NON_ZERO + + mask = MASK_IF_ZERO(val); + return SELECT_USING_MASK(mask, Q31_MAX, result); +} + +__STATIC_FORCEINLINE q31_t arm_nn_mult_by_power_of_two(const int32_t val, const int32_t exp) +{ + const int32_t thresh = ((1 << (31 - exp)) - 1); + int32_t result = val << exp; + result = SELECT_USING_MASK(MASK_IF_NON_ZERO(val > thresh), Q31_MAX, result); + result = SELECT_USING_MASK(MASK_IF_NON_ZERO(val < -thresh), Q31_MIN, result); + return result; +} + +__STATIC_FORCEINLINE int32_t arm_nn_one_over_one_plus_x_for_x_in_0_1(int32_t val) +{ + const int64_t sum = (int64_t)val + (int64_t)Q31_MAX; + const int32_t half_denominator = (int32_t)((sum + (sum >= 0 ? 1 : -1)) / 2L); + int32_t x = 1515870810 + MUL_SAT(half_denominator, -1010580540); + + const int32_t shift = (1 << 29); + x += MUL_POW2(MUL_SAT(x, shift - MUL_SAT(half_denominator, x)), 2); + x += MUL_POW2(MUL_SAT(x, shift - MUL_SAT(half_denominator, x)), 2); + x += MUL_POW2(MUL_SAT(x, shift - MUL_SAT(half_denominator, x)), 2); + + return MUL_POW2(x, 1); +} + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/adder.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/adder.h new file mode 100755 index 00000000000..00c92796f3d --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/adder.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_ADDER_H_ +#define MINDSPORE_LITE_NNACL_ADDER_H_ + +#include "nnacl/op_base.h" + +typedef struct AdderParameter { + OpParameter op_parameter_; +} AdderParameter; + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_ADDER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/arg_min_max_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/arg_min_max_parameter.h new file mode 100755 index 00000000000..9569d958ff4 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/arg_min_max_parameter.h @@ -0,0 +1,54 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_ARG_MIN_MAX_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_ARG_MIN_MAX_PARAMETER_H_ + +#ifdef ENABLE_ARM64 +#include +#endif +#include "nnacl/op_base.h" + +typedef int (*COMPARE_FUNCTION)(const void *a, const void *b); + +typedef struct ArgElement { + uint32_t index_; + union ArgData { + int8_t i8_data_; + int32_t i_data_; + float f_data_; +#ifdef ENABLE_ARM64 + float16_t f16_data_; +#endif + } data_; +} ArgElement; + +typedef struct ArgMinMaxParameter { + OpParameter op_parameter_; + bool out_value_; + bool keep_dims_; + bool get_max_; + int32_t axis_; + int32_t topk_; + int32_t axis_type_; + int32_t dims_size_; + int32_t data_type_; // equals to type_id + int32_t in_strides_[COMM_SHAPE_SIZE]; + int32_t out_strides_[COMM_SHAPE_SIZE]; + ArgElement *arg_elements_; +} ArgMinMaxParameter; + +#endif // MINDSPORE_LITE_NNACL_ARG_MIN_MAX_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/arithmetic.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/arithmetic.h new file mode 100755 index 00000000000..198a2b6fcea --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/arithmetic.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_ARTITHMETIC_H_ +#define MINDSPORE_LITE_NNACL_ARTITHMETIC_H_ + +#include "nnacl/op_base.h" +#include "nnacl/common_func.h" +#include "nnacl/nnacl_utils.h" + +typedef struct ArithmeticParameter { + OpParameter op_parameter_; + bool broadcasting_; + size_t ndim_; + int activation_type_; + int in_shape0_[10]; + int in_elements_num0_; + int in_shape1_[10]; + int in_elements_num1_; + + int out_shape_[10]; + int out_elements_num_; + + int in_strides0_[10]; + int in_strides1_[10]; + int out_strides_[10]; + + int multiples0_[10]; + int multiples1_[10]; + int eltwise_mode_; // eltwise need +} ArithmeticParameter; + +#endif // MINDSPORE_LITE_NNACL_ARTITHMETIC_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/arithmetic_self_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/arithmetic_self_parameter.h new file mode 100755 index 00000000000..d98eb72613c --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/arithmetic_self_parameter.h @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_ARITHMETIC_SELF_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_ARITHMETIC_SELF_PARAMETER_H_ + +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/int8/quantize.h" + +// For Abs, Cos, Exp, Log, Square, Sqrt, Rsqrt ops. +typedef struct ArithmeticSelfParameter { + OpParameter op_parameter_; + ArithSelfQuantArg quant_arg_; +} ArithmeticSelfParameter; + +#endif // MINDSPORE_LITE_NNACL_ARITHMETIC_SELF_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/assembly_global.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/assembly_global.h new file mode 100755 index 00000000000..e7cf56c4a1d --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/assembly_global.h @@ -0,0 +1,33 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_ASSEMBLY_GLOBAL_H +#define MINDSPORE_LITE_NNACL_ASSEMBLY_GLOBAL_H + +.macro asm_function fname +#ifdef __APPLE__ + .globl _\fname; +_\fname : +#else + .global \fname; +#ifdef __ELE__ +.hidden \fname; +.type \fname, % function; +#endif +\fname : +#endif + .endm + +#endif // MINDSPORE_LITE_NNACL_ASSEMBLY_GLOBAL_H diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/arithmetic_base.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/arithmetic_base.h new file mode 100755 index 00000000000..3e77c944c60 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/arithmetic_base.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_BASE_ARITHMETIC_BASE_H_ +#define MINDSPORE_LITE_NNACL_BASE_ARITHMETIC_BASE_H_ + +#include "nnacl/arithmetic.h" +#include "nnacl/nnacl_utils.h" +#include "nnacl/nnacl_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void CalcMultiplesAndStrides(ArithmeticParameter *param); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_BASE_ARITHMETIC_BASE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/batch_to_space_base.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/batch_to_space_base.h new file mode 100755 index 00000000000..a4fd9548a70 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/batch_to_space_base.h @@ -0,0 +1,33 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_BATCH_TO_SPACE_BASE_H_ +#define MINDSPORE_LITE_NNACL_BATCH_TO_SPACE_BASE_H_ + +#include +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif +void BatchToSpaceNoCropForNHWC(const void *input, void *output, const int *in_shape, int out_n, const int *block, + int data_size); +void BatchToSpaceForNHWC(const void *input, void *output, const int *in_shape, int out_n, const int *block, + const int *crops, int data_size); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_BATCH_TO_SPACE_BASE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/cast_base.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/cast_base.h new file mode 100755 index 00000000000..757007b0b34 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/cast_base.h @@ -0,0 +1,104 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_CAST_BASE_H_ +#define MINDSPORE_LITE_NNACL_CAST_BASE_H_ + +#include "nnacl/op_base.h" +#include "nnacl/nnacl_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +inline void BoolToFloat32(const bool *input, float *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (float)input[i]; + } +} + +inline void Uint8ToFloat32(const uint8_t *input, float *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (float)input[i]; + } +} + +inline void Int32ToFloat32(const int32_t *input, float *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (float)input[i]; + } +} + +inline void Int64ToFloat32(const int64_t *input, float *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (float)input[i]; + } +} + +#ifdef ENABLE_FP16 +inline void Int64ToFp16(const int64_t *input, float16_t *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (float16_t)input[i]; + } +} +#endif + +inline void Fp16ToFloat32(const uint16_t *input, float *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = ShortToFloat32(input[i]); + } +} + +inline void Float32ToFp16(const float *input, uint16_t *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = Float32ToShort(input[i]); + } +} + +inline void Float32ToInt32(const float *input, int32_t *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (int32_t)input[i]; + } +} + +inline void Float32ToInt64(const float *input, int64_t *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (int64_t)input[i]; + } +} + +inline void Int32ToInt64(const int32_t *input, int64_t *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (int64_t)input[i]; + } +} + +inline void Float32ToInt16(const float *input, int16_t *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (int16_t)input[i]; + } +} + +inline void BoolToInt32(const bool *input, int32_t *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (int32_t)input[i]; + } +} + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_CAST_BASE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/concat_base.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/concat_base.h new file mode 100755 index 00000000000..ae6bc5da02e --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/concat_base.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_CONCAT_BASE_H_ +#define MINDSPORE_LITE_NNACL_FP32_CONCAT_BASE_H_ + +#include +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif +void Concat(void **input, int input_num, int axis, int **inputs_output_shape, size_t shape_size, void *output, + int task_id, int thread_num, int data_size); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_CONCAT_BASE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/conv1x1_base.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/conv1x1_base.h new file mode 100755 index 00000000000..fc2b63d7b0f --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/conv1x1_base.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_BASE_CONV1X1_BASE_H_ +#define MINDSPORE_LITE_NNACL_BASE_CONV1X1_BASE_H_ + +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void Conv1x1InputPack(const void *src_ptr, void *dst_ptr, ConvParameter *conv_param, int data_size); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_BASE_CONV1X1_BASE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/depth_to_space_base.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/depth_to_space_base.h new file mode 100755 index 00000000000..23474a4f44a --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/depth_to_space_base.h @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_DEPTH_TO_SPACE_H_ +#define MINDSPORE_LITE_NNACL_DEPTH_TO_SPACE_H_ + +#include +#include "nnacl/depth_to_space_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void DepthToSpaceForNHWC(const void *input, void *output, const int *in_shape, const DepthToSpaceParameter *param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_DEPTH_TO_SPACE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/fill_base.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/fill_base.h new file mode 100755 index 00000000000..79a100d83e5 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/fill_base.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FILL_BASE_H_ +#define MINDSPORE_LITE_NNACL_FILL_BASE_H_ + +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/fill_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +int FillFp32(float *output, int size, float data); +int FillInt32(int *output, int size, int data); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FILL_BASE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/gather_base.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/gather_base.h new file mode 100755 index 00000000000..1c3eb0c9b1b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/gather_base.h @@ -0,0 +1,33 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_GATHER_BASE_H_ +#define MINDSPORE_LITE_NNACL_GATHER_BASE_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" + +#ifdef __cplusplus +extern "C" { +#endif +int Gather(const void *input, int outer_size, int inner_size, int limit, const int *indices, int indices_element_size, + void *output, int data_size); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_GATHER_BASE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/minimal_filtering_generator.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/minimal_filtering_generator.h new file mode 100755 index 00000000000..46f4357b112 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/minimal_filtering_generator.h @@ -0,0 +1,63 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_MINIMAL_FILTERING_GENERATOR_H_ +#define MINDSPORE_LITE_NNACL_MINIMAL_FILTERING_GENERATOR_H_ + +#ifdef ENABLE_ARM +#include +#endif +#include +#include "nnacl/pack.h" + +#ifdef __cplusplus +extern "C" { +#endif +void Polynomial(const float *interval, float *m, int degree); + +void DiagonalPlusMatrix(const float *matrix, float *diagonal_matrix, int degree); + +void ResidueMatrix(const float *interval, float *b, int row, int col); + +int LT(const float *poly_array, float *matrix_lt, int n); + +void T(const float *poly_array, float *matrix_t, int n); + +int B(const float *poly_array, float *matrix_b, int in_unit); + +void GenerateIntervalArray(float *array, float interval, int degree); + +void MatrixTranspose(const float *matrix, float *trans_matrix, int row, int col); + +void MatrixMultiply(const float *matrix_a, const float *matrix_b, float *matrix_c, int m, int k, int n); + +int CookToomFilter(float *matrix_a, float *matrix_at, float *matrix_b, float *matrix_bt, float *matrix_g, + float *matrix_gt, float coefficient, int out_unit, int filter_size); +void MatrixMultiplyWinograd(const float *matix_a, const float *matrix_b, float *matrix_c, int m, int k, int n, + int in_channel, int c4_channel); + +int WinogradWeightTransform(const float *weight_data, float *winograd_data, float *matrix_g, const float *matrix_gt, + int oc_block, int input_unit_, int kernel_unit_, int channel, int batch, bool pack); + +#if defined(ENABLE_ARM) || defined(ENABLE_SSE) +void MatrixMultiplyVec(const MS_FLOAT32X4 *matrix_a, const MS_FLOAT32X4 *matrix_b, MS_FLOAT32X4 *matrix_c, + const float *bias, int m, int k, int n); +#endif +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_MINIMAL_FILTERING_GENERATOR_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/slice_base.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/slice_base.h new file mode 100755 index 00000000000..35ad748ecac --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/slice_base.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_BASE_SLICE_BASE_H_ +#define MINDSPORE_LITE_NNACL_BASE_SLICE_BASE_H_ + +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/slice_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void PadSliceParameterTo4D(SliceParameter *param); + +void DoSlice(const void *input, void *output, SliceParameter *param, int thread_id, int data_size); +void DoSliceNoParallel(const void *input, void *output, SliceParameter *param, int data_size); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_BASE_SLICE_BASE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/space_to_depth_base.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/space_to_depth_base.h new file mode 100755 index 00000000000..c2380202140 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/space_to_depth_base.h @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_SRC_BACKEND_ARM_NNACL_BASE_SPACE_TO_DEPTH_BASE_H_ +#define MINDSPORE_LITE_SRC_BACKEND_ARM_NNACL_BASE_SPACE_TO_DEPTH_BASE_H_ + +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif +int SpaceToDepthForNHWC(const void *input, void *output, const int *in_shape, const int *out_shape, int shape_size, + int block_size, int h_start, int h_end, int data_size); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_SRC_BACKEND_ARM_NNACL_BASE_SPACE_TO_DEPTH_BASE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/split_base.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/split_base.h new file mode 100755 index 00000000000..3114e83f6ca --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/split_base.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_NNACL_SPLIT_BASE_H_ +#define MINDSPORE_LITE_NNACL_NNACL_SPLIT_BASE_H_ + +#include "nnacl/op_base.h" +#include "nnacl/split_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +int DoSplit(void *in_data, void **out_data, const int *input_shape, int offset, int num_unit, + SplitParameter *split_param, int data_size); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_NNACL_SPLIT_BASE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/stack_base.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/stack_base.h new file mode 100755 index 00000000000..0ccfa7a5e3d --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/stack_base.h @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_STACK_H_ +#define MINDSPORE_LITE_NNACL_STACK_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/stack_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void Stack(char **inputs, char *output, size_t input_num, size_t copy_size, size_t outter_size); +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_STACK_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/tile_base.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/tile_base.h new file mode 100755 index 00000000000..7a499a3aa7b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/tile_base.h @@ -0,0 +1,53 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_BASE_TILE_H_ +#define MINDSPORE_LITE_NNACL_BASE_TILE_H_ + +#include "nnacl/op_base.h" + +typedef struct TileParameter { + // primitive parameter + OpParameter op_parameter_; + int multiples_[5]; + int dims_[5]; + size_t dims_size_; + size_t multiples_size_; + + // shape correlative + int in_shape_[5]; + int out_shape_[5]; + int in_strides_[5]; + int out_strides_[5]; + + // other parameter + int in_dim_; + size_t data_size_; + size_t fast_outer_size_; + size_t fast_stride_; + size_t fast_multiple_; +} TileParameter; + +#ifdef __cplusplus +extern "C" { +#endif +void Tile(void *input_data, void *output_data, TileParameter *parameter); +void TileSimple(void *input_data, void *output_data, size_t begin, size_t end, TileParameter *parameter); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_BASE_TILE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/unstack_base.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/unstack_base.h new file mode 100755 index 00000000000..ffc4637a994 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/unstack_base.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_UNSTACK_H_ +#define MINDSPORE_LITE_NNACL_UNSTACK_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/unstack_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void Unstack(const void *input, void **output, UnstackParameter *para, int data_size); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_UNSTACK_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/zeroslike_base.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/zeroslike_base.h new file mode 100755 index 00000000000..a778b9ae95e --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/base/zeroslike_base.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_ZEROSLIKE_BASE_H_ +#define MINDSPORE_LITE_NNACL_ZEROSLIKE_BASE_H_ + +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif + +static inline void ApproximateZerosLike(void *output, int number, int data_size) { + memset(output, 0.0, number * data_size); + return; +} + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_ZEROSLIKE_BASE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/batch_to_space.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/batch_to_space.h new file mode 100755 index 00000000000..7a8a76003b1 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/batch_to_space.h @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_BATCH_TO_SPACE_H_ +#define MINDSPORE_LITE_NNACL_BATCH_TO_SPACE_H_ + +#include +#include "nnacl/op_base.h" + +#define BATCH_TO_SPACE_BLOCK_SHAPE_SIZE 2 + +typedef struct BatchToSpaceParameter { + OpParameter op_parameter_; + int32_t block_shape_[BATCH_TO_SPACE_BLOCK_SHAPE_SIZE]; + int32_t crops_[COMM_SHAPE_SIZE]; + bool no_crop_; +} BatchToSpaceParameter; + +#endif // MINDSPORE_LITE_NNACL_FP32_BATCH_TO_SPACE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/batchnorm_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/batchnorm_parameter.h new file mode 100755 index 00000000000..8e460cf4381 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/batchnorm_parameter.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_BATCHNORM_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_BATCHNORM_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct BatchNormParameter { + OpParameter op_parameter_; + float epsilon_; + float momentum_; + int unit_; + int units_; + int channel_; + bool fused_; +} BatchNormParameter; + +#endif // MINDSPORE_LITE_NNACL_BATCHNORM_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/broadcast_to_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/broadcast_to_parameter.h new file mode 100755 index 00000000000..dd9823729b6 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/broadcast_to_parameter.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_BROADCAST_TO_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_FP32_BROADCAST_TO_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct BroadcastToParameter { + OpParameter op_parameter_; + int shape_[COMM_SHAPE_SIZE]; + size_t shape_size_; +} BroadcastToParameter; + +typedef struct BroadcastShapeInfo { + int input_shape_[COMM_SHAPE_SIZE]; + int input_shape_size_; + int output_shape_[COMM_SHAPE_SIZE]; + int output_shape_size_; +} BroadcastShapeInfo; + +#endif // MINDSPORE_LITE_NNACL_FP32_BROADCAST_TO_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/cast_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/cast_parameter.h new file mode 100755 index 00000000000..4a56bc3a987 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/cast_parameter.h @@ -0,0 +1,27 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_CAST_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_CAST_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct CastParameter { + OpParameter op_parameter_; + int dst_type_; + int src_type_; +} CastParameter; + +#endif // MINDSPORE_LITE_NNACL_CAST_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/common_func.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/common_func.h new file mode 100755 index 00000000000..0ab52e62b77 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/common_func.h @@ -0,0 +1,77 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_COMMON_FUNC_H_ +#define MINDSPORE_LITE_NNACL_COMMON_FUNC_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/conv_parameter.h" +#include "nnacl/nnacl_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int8_t MinInt8(int8_t a, int8_t b); +int8_t MaxInt8(int8_t a, int8_t b); +void ReluFp32(float *data, float *dst, int ele_num); +void Relu6Fp32(float *data, float *dst, int ele_num); +#ifdef ENABLE_AVX +#ifdef WIN32 +void ReluFp32C8(float *data, float *dst, int ele_num); +void Relu6Fp32C8(float *data, float *dst, int ele_num); +#endif +#endif +int offset(const int *shape, const int dim0, const int dim1, const int dim2, const int dim3); +int offsetComm(const int *shape, const int dim0, const int dim1, const int dim2); +int offset4d(const int *shape, const int *dims); + +static inline bool isAddOverflow(int32_t x, int32_t y) { + int32_t sum = x + y; + return (x > 0 && y > 0 && sum < 0) || (x < 0 && y < 0 && sum > 0); +} + +static inline bool isMulOverflow(int32_t x, int32_t y) { + int32_t p = x * y; + return (x != 0) && (p / x != y); +} + +static inline int GetStride(int *strides, const int *shape, int length) { + if (length <= 0) { + return 1; + } + int stride = 1; + for (int i = length - 1; i >= 0; --i) { + strides[i] = stride; + stride *= shape[i]; + } + return stride; +} + +#ifdef ENABLE_ARM64 +void BiasAdd(const float *bias, float *data, size_t oc4, size_t plan_size); +void BiasAddRelu6(const float *bias, float *data, size_t oc4, size_t plan_size); +void BiasAddRelu(const float *bias, float *data, size_t oc4, size_t plan_size); +void Relu6(float *data, size_t element4); +void Relu(float *data, size_t element4); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* MINDSPORE_LITE_NNACL_COMMON_FUNC_H_ */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/concat_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/concat_parameter.h new file mode 100755 index 00000000000..35386464e5f --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/concat_parameter.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_CONCAT_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_CONCAT_PARAMETER_H_ + +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" + +typedef struct ConcatParameter { + OpParameter op_parameter_; + ConcatQuantArg quant_arg_; + int axis_; + int thread_count_; + int input_num_; + int **input_shapes_; + int *output_shapes_; + int64_t after_axis_size; + int64_t count_unit_; +} ConcatParameter; + +#endif // MINDSPORE_LITE_NNACL_CONCAT_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/constant_of_shape_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/constant_of_shape_parameter.h new file mode 100755 index 00000000000..813fdf488da --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/constant_of_shape_parameter.h @@ -0,0 +1,31 @@ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_CONSTANT_OF_SHAPE_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_CONSTANT_OF_SHAPE_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct ConstantOfShapeParameter { + OpParameter op_parameter_; + union value_ { + float f32_value_; + int32_t int32_value_; + } value_; + int data_type_; + int element_size_; +} ConstantOfShapeParameter; + +#endif // MINDSPORE_LITE_NNACL_CONSTANT_OF_SHAPE_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/conv_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/conv_parameter.h new file mode 100755 index 00000000000..5fa4e215d54 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/conv_parameter.h @@ -0,0 +1,131 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_CONV_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_CONV_PARAMETER_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" + +typedef struct ConvParameter { + OpParameter op_parameter_; + ConvQuantArg conv_quant_arg_; + int kernel_h_; + int kernel_w_; + int stride_h_; + int stride_w_; + int dilation_h_; + int dilation_w_; + int pad_u_; + int pad_d_; + int pad_l_; + int pad_r_; + int group_; + int tile_num_; + int input_batch_; + int input_h_; + int input_w_; + int input_channel_; + int output_batch_; + int output_h_; + int output_w_; + int output_channel_; + int thread_num_; + int input_unit_; + int output_unit_; + PadMode pad_mode_; + ActType act_type_; + int channel_multiplie_; + int output_padding_w_; + int output_padding_h_; +} ConvParameter; + +typedef struct SlidingWindowParam { + int left_; + int right_; + int top_; + int bottom_; + int c_block_; + int block_channel_; + int ic4_channel_; + int out_step_; + int out_h_step_; + int in_step_; + int in_h_step_; + int in_sh_step_; // stride H + int in_sw_step_; // stride W + int in_kh_step_; // kernel H + int in_kw_step_; // kernel W + int kernel_step_; +} SlidingWindowParam; + +#define OUPUT_UNIT 2 +#define DECONV_WINOGRAD_DEFAULT_UNIT 3 +#define DECONV_WINOGRAD_DEFAULT_TILE 8 +#define DECONV_WINOGRAD_BUFFER_COUNT 8 +typedef struct DeConvWg { + void *b_buffer_; + void *AT_; + void *BT_; + + int kh_; + int kw_; + + int k_; + int i_; + int o_; +} DeConvWg; + +typedef struct DeConvWgABuffer { + bool buf_init_; + void *middle_buffer_; + void *dest_buffer_; +} DeConvWgABuffer; + +typedef struct DeConvComputeUnit { + void *weight_; + void *tmp_buffer_; + int w_start_; + int h_start_; + int w_size_; + int h_size_; + bool use_winograd_; + DeConvWg winograd_; +} DeConvComputeUnit; + +typedef struct DeConvParam { + DeConvComputeUnit *compute_units_; + int compute_size_; + DeConvWgABuffer a_buffer_[DECONV_WINOGRAD_BUFFER_COUNT]; + int input_plane_; + int output_plane_; + int kernel_plane_; + int ic_div4_; + int oc_div4_; + int ic_up4_; + int oc_up4_; + int thread_num_; + int in_tile_count_; + int in_tile_h_count_; + int in_tile_w_count_; + int out_tile_h_; + int out_tile_w_; +} DeConvParam; + +#endif // MINDSPORE_LITE_NNACL_CONV_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/crop_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/crop_parameter.h new file mode 100755 index 00000000000..6730a0be6e8 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/crop_parameter.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_CROP_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_CROP_PARAMETER_H_ + +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" + +typedef struct CropParameter { + OpParameter op_parameter_; + CropQuantArg quant_arg; + int thread_count_; + int offset_size_; + int64_t offset_[COMM_SHAPE_SIZE]; + int64_t in_offset_[COMM_SHAPE_SIZE]; + int64_t axis_; + int *in_shape_; + int *out_shape_; + int input_dim_; +} CropParameter; + +#endif // MINDSPORE_LITE_NNACL_CROP_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/depth_to_space_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/depth_to_space_parameter.h new file mode 100755 index 00000000000..0c9dfe9ba45 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/depth_to_space_parameter.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_DEPTH_TO_SPACE_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_DEPTH_TO_SPACE_PARAMETER_H_ +#include "nnacl/op_base.h" + +typedef struct DepthToSpaceParameter { + OpParameter op_parameter_; + // primitive parameter + int32_t block_size_; + // shape correlative + int32_t in_stride_dim0_; + int32_t in_stride_dim1_; + int32_t in_stride_dim2_; + int32_t out_stride_dim0_; + int32_t out_stride_dim1_; + int32_t out_stride_dim2_; + // other parameter + uint8_t data_type_size_; +} DepthToSpaceParameter; + +#endif // MINDSPORE_LITE_NNACL_DEPTH_TO_SPACE_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/detection_post_process_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/detection_post_process_parameter.h new file mode 100755 index 00000000000..f7bc270a6ca --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/detection_post_process_parameter.h @@ -0,0 +1,48 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_DETECTION_POST_PROCESS_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_DETECTION_POST_PROCESS_PARAMETER_H_ +#include "nnacl/op_base.h" + +typedef struct DetectionPostProcessParameter { + OpParameter op_parameter_; + float h_scale_; + float w_scale_; + float x_scale_; + float y_scale_; + float nms_iou_threshold_; + float nms_score_threshold_; + int64_t max_detections_; + int64_t detections_per_class_; + int64_t max_classes_per_detection_; + int64_t num_classes_; + bool use_regular_nms_; + bool out_quantized_; + + float *anchors_; + + void *decoded_boxes_; + void *nms_candidate_; + void *indexes_; + void *scores_; + void *all_class_indexes_; + void *all_class_scores_; + void *single_class_indexes_; + void *selected_; +} DetectionPostProcessParameter; + +#endif // MINDSPORE_LITE_NNACL_DETECTION_POST_PROCESS_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/errorcode.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/errorcode.h new file mode 100755 index 00000000000..18a50290ccc --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/errorcode.h @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_ERRORCODE_H_ +#define MINDSPORE_LITE_NNACL_ERRORCODE_H_ + +typedef enum ErrorCodeCommonEnum { + NNACL_OK = 0, + NNACL_ERR = 1, + NNACL_NULL_PTR, + NNACL_PARAM_INVALID, + NNACL_INFER_INVALID, + NNACL_INPUT_TENSOR_ERROR, + NNACL_COMMON_END = 9999 +} ErrorCodeCommonEnum; + +typedef enum ErrorCodeFp32OpEnum { + NNACL_ERRCODE_OP_FP32_START = 10000, + NNACL_ERRCODE_STRASSEN_RECURSION_MALLOC, + NNACL_ERRCODE_REVERSE_MALLOC, + NNACL_ERRCODE_SQRT_NEGATIVE, + NNACL_ERRCODE_RSQRT_NEGATIVE, + NNACL_ERRCODE_RSQRT_NEGATIVE_OR_ZERO, + NNACL_ERRCODE_LOG_NEGATIVE_OR_ZERO, + NNACL_ERRCODE_DIVISOR_ZERO, + NNACL_ERRCODE_INDEX_OUT_OF_RANGE, + NNACL_ERRCODE_WINOGRAD_GENERATOR_ERROR, + NNACL_ERRCODE_OP_FP32_END = 19999 +} ErrorCodeFp32OpEnum; + +typedef enum ErrorCodeFp16OpEnum { + NNACL_ERRCODE_OP_FP16_START = 20000, + NNACL_ERRCODE_OP_FP16_WINOGRAD_GENERATOR, + NNACL_ERRCODE_OP_FP16_END = 29999 +} ErrorCodeFp16OpEnum; + +typedef enum ErrorCodeUint8OpEnum { + NNACL_ERRCODE_OP_UINT8_START = 30000, + NNACL_ERRCODE_OP_UINT8_END = 39999 +} ErrorCodeUint8OpEnum; + +typedef enum ErrorCodeInt8OpEnum { + NNACL_ERRCODE_OP_INT8_START = 40000, + NNACL_ERRCODE_ADD_OVERFLOW, + NNACL_ERRCODE_MUL_OVERFLOW, + NNACL_ERRCODE_OP_INT8_END = 49999 +} ErrorCodeInt8OpEnums; + +#endif // MINDSPORE_LITE_NNACL_ERRORCODE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fill_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fill_parameter.h new file mode 100755 index 00000000000..dbde073817a --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fill_parameter.h @@ -0,0 +1,28 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FILL_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_FILL_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct FillParameter { + // Primitive parameter + OpParameter op_parameter_; + int dims_[COMM_SHAPE_SIZE]; + int num_dims_; +} FillParameter; + +#endif // MINDSPORE_LITE_NNACL_FILL_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/activation_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/activation_fp32.h new file mode 100755 index 00000000000..1f741c50b5b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/activation_fp32.h @@ -0,0 +1,49 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_ACTIVATION_H_ +#define MINDSPORE_LITE_NNACL_FP32_ACTIVATION_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/int8/fixed_point.h" + +typedef struct ActivationParameter { + OpParameter op_parameter_; + int type_; + float alpha_; + float min_val_; + float max_val_; +} ActivationParameter; + +#ifdef __cplusplus +extern "C" { +#endif +int Fp32Relu(const float *src, int length, float *dst); +int Fp32Relu6(const float *src, int length, float *dst); +int LRelu(const float *src, int length, float *dst, float alpha); +int Sigmoid(const float *src, int length, float *dst); +int Tanh(const float *src, int length, float *dst); +int HSigmoid(const float *src, int length, float *dst); +int Swish(const float *src, int length, float *dst); +int HSwish(const float *src, int length, float *dst); +int HardTanh(const float *src, int length, float *dst, float min_val, float max_val); +int Gelu(const float *src, int length, float *dst, bool approximate); + +float TanhOpt(float src); +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_FP32_ACTIVATION_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/add_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/add_fp32.h new file mode 100755 index 00000000000..4344f33175b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/add_fp32.h @@ -0,0 +1,45 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_ADD_H_ +#define MINDSPORE_LITE_NNACL_FP32_ADD_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/base/arithmetic_base.h" +#include "nnacl/errorcode.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ElementAdd(const float *in0, const float *in1, float *out, int size); +int ElementAddRelu(const float *in0, const float *in1, float *out, int size); +int ElementAddRelu6(const float *in0, const float *in1, float *out, int size); +int ElementAddInt(const int *in0, const int *in1, int *out, int size); +int ElementOptAdd(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptAddInt(const int *in0, const int *in1, int *out, int size, const ArithmeticParameter *param); +int ElementOptAddRelu(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptAddRelu6(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int BroadcastAdd(const float *in0, const float *in1, float *tile_in0, float *tile_in1, float *out, int size, + ArithmeticParameter *param); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_ADD_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/adder_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/adder_fp32.h new file mode 100755 index 00000000000..fd3956ac085 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/adder_fp32.h @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_ADDER_H_ +#define MINDSPORE_LITE_NNACL_FP32_ADDER_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/pack.h" +#include "nnacl/op_base.h" +#include "nnacl/common_func.h" +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef ENABLE_ARM64 +void AdderFloatNeon64(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row, + int col, size_t stride); +#endif + +void AdderOpt(const float *a, const float *b, float *c, const float *bias, ActType act_type, int deep, int row, int col, + size_t stride); + +void AdderFp32(const float *input_data, float *packed_input, const float *packed_weight, const float *bias_data, + float *col_major_input, float *output_data, int task_id, ConvParameter *conv_param); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_ADDER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/arg_min_max_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/arg_min_max_fp32.h new file mode 100755 index 00000000000..509e85203f3 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/arg_min_max_fp32.h @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_ARG_MIN_MAX_H_ +#define MINDSPORE_LITE_NNACL_FP32_ARG_MIN_MAX_H_ + +#include "nnacl/nnacl_common.h" +#include "nnacl/arg_min_max_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void ArgMinMaxFp32(const float *input, void *output, float *output_value, const int *in_shape, + const ArgMinMaxParameter *param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_ARG_MIN_MAX_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/arithmetic_compare_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/arithmetic_compare_fp32.h new file mode 100755 index 00000000000..5f0ed2d58b6 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/arithmetic_compare_fp32.h @@ -0,0 +1,50 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_ARITHMETIC_COMPARE_H_ +#define MINDSPORE_LITE_NNACL_ARITHMETIC_COMPARE_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" + +#ifdef __cplusplus +extern "C" { +#endif +int ElementEqualFp32(const float *input0, const float *input1, uint8_t *output, int element_size); +int ElementEqualInt32(const int32_t *input0, const int32_t *input1, uint8_t *output, int element_size); + +int ElementNotEqualFp32(const float *input0, const float *input1, uint8_t *output, int element_size); +int ElementNotEqualInt32(const int32_t *input0, const int32_t *input1, uint8_t *output, int element_size); + +int ElementLessFp32(const float *input0, const float *input1, uint8_t *output, int element_size); +int ElementLessInt32(const int32_t *input0, const int32_t *input1, uint8_t *output, int element_size); + +int ElementLessEqualFp32(const float *input0, const float *input1, uint8_t *output, int element_size); +int ElementLessEqualInt32(const int32_t *input0, const int32_t *input1, uint8_t *output, int element_size); + +int ElementGreaterFp32(const float *input0, const float *input1, uint8_t *output, int element_size); +int ElementGreaterInt32(const int32_t *input0, const int32_t *input1, uint8_t *output, int element_size); + +int ElementGreaterEqualFp32(const float *input0, const float *input1, uint8_t *output, int element_size); +int ElementGreaterEqualInt32(const int32_t *input0, const int32_t *input1, uint8_t *output, int element_size); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_ARITHMETIC_COMPARE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/arithmetic_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/arithmetic_fp32.h new file mode 100755 index 00000000000..1580ea3b9f5 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/arithmetic_fp32.h @@ -0,0 +1,70 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_ARITHMETIC_H_ +#define MINDSPORE_LITE_NNACL_ARITHMETIC_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/base/arithmetic_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/fp32/add_fp32.h" +#include "nnacl/fp32/mul_fp32.h" +#include "nnacl/fp32/div_fp32.h" +#include "nnacl/fp32/sub_fp32.h" +#include "nnacl/fp32/squared_difference.h" + +#ifdef __cplusplus +extern "C" { +#endif +void TileOneDimensionFp32(const float *inData, float *outData, int dim, size_t ndim, const int *inShape, + const int *inStrides, const int *outStrides, const int *multiple); +void TileDimensionsFp32(const float *data0, const float *data1, float *tile_data0, float *tile_data1, + ArithmeticParameter *param); +/* logical and */ +int ElementLogicalAnd(const float *in0, const float *in1, float *out, int size); +int ElementLogicalAndInt(const int *in0, const int *in1, int *out, int size); +int ElementLogicalAndBool(const bool *in0, const bool *in1, bool *out, int size); + +/* logical or */ +int ElementLogicalOr(const float *in0, const float *in1, float *out, int size); + +/* max min */ +int ElementMaximum(const float *in0, const float *in1, float *out, int size); +int ElementMinimum(const float *in0, const float *in1, float *out, int size); +int ElementMaximumInt(const int *in0, const int *in1, int *out, int size); +int ElementMinimumInt(const int *input0, const int *input1, int *output, const int element_size); + +/* floor div */ +int ElementFloorDiv(const float *in0, const float *in1, float *out, int size); +int ElementFloorDivInt(const int *in0, const int *in1, int *out, int size); + +/* floor mod */ +int ElementFloorMod(const float *in0, const float *in1, float *out, int size); +int ElementFloorModInt(const int *in0, const int *in1, int *out, int size); + +/* mod */ +int ElementMod(const float *in0, const float *in1, float *out, int size); +int ElementModInt(const int *in0, const int *in1, int *out, int size); +int ElementOptMod(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptModInt(const int *in0, const int *in1, int *out, int size, const ArithmeticParameter *param); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_ARITHMETIC_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/arithmetic_self_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/arithmetic_self_fp32.h new file mode 100755 index 00000000000..aefd434d6f9 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/arithmetic_self_fp32.h @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_ARITHMETIC_SELF_H_ +#define MINDSPORE_LITE_NNACL_ARITHMETIC_SELF_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" + +#ifdef __cplusplus +extern "C" { +#endif +int ElementAbs(const float *input, float *output, const int element_size); + +int ElementCos(const float *input, float *output, const int element_size); + +int ElementLog(const float *input, float *output, const int element_size); + +int ElementSquare(const float *input, float *output, const int element_size); + +int ElementSqrt(const float *input, float *output, const int element_size); + +int ElementRsqrt(const float *input, float *output, const int element_size); + +int ElementSin(const float *input, float *output, const int element_size); + +int ElementLogicalNot(const float *input, float *output, const int element_size); + +int ElementLogicalNotBool(const bool *input, bool *output, const int element_size); + +int ElementRound(const float *input, float *output, const int element_size); + +int ElementFloor(const float *input, float *output, const int element_size); + +int ElementCeil(const float *input, float *output, const int number); + +int ElementNegative(const float *input, float *output, const int element_size); + +int ElementReciprocal(const float *input, float *output, const int element_size); + +int ElementErf(const float *input, float *output, const int element_size); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_ARITHMETIC_SELF_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/batchnorm_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/batchnorm_fp32.h new file mode 100755 index 00000000000..043c7c1335b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/batchnorm_fp32.h @@ -0,0 +1,37 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_BATCHNORM_H_ +#define MINDSPORE_LITE_NNACL_FP32_BATCHNORM_H_ + +#include "nnacl/batchnorm_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void BatchNormFp32(const void *input, const void *mean, const void *variance, const BatchNormParameter *param, + int task_id, void *output); +void FusedBatchNormFp32(const void *input, const void *scale, const void *offset, const void *mean, + const void *variance, const BatchNormParameter *param, int task_id, void *output); + +void FusedBatchNormFp32MeanVar(const float *input, float *run_mean, float *run_var, const BatchNormParameter *param, + float *save_mean, float *save_var); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FUSED_BATCHNORM_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/broadcast_to_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/broadcast_to_fp32.h new file mode 100755 index 00000000000..5c354be667d --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/broadcast_to_fp32.h @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_BROADCAST_TO_FP32_H_ +#define MINDSPORE_LITE_NNACL_FP32_BROADCAST_TO_FP32_H_ + +#include "nnacl/op_base.h" +#include "nnacl/broadcast_to_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +int BroadcastTo(const float *input, BroadcastShapeInfo *shape_info, float *output); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_BROADCAST_TO_FP32_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/common_func_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/common_func_fp32.h new file mode 100755 index 00000000000..fc6f75ebe9a --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/common_func_fp32.h @@ -0,0 +1,105 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_COMMON_FUNC_H_ +#define MINDSPORE_LITE_NNACL_FP32_COMMON_FUNC_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/conv_parameter.h" + +typedef struct ConvDwFp32BorderParam { + float *dst; + const float *src; + const float *weight; + const float *bias; + size_t height; + size_t width; + size_t in_kh_step; + size_t in_kw_step; + size_t kernel_w; + size_t relu; + size_t relu6; +} ConvDwFp32BorderParam; + +#ifdef __cplusplus +extern "C" { +#endif + +void PostConvFuncFp32C8(const float *c8_out_ptr, float *out_ptr, const float *bias_ptr, size_t output_channel, + size_t plane_size, size_t stride, size_t relu_type); +void PostConvFuncFp32C4(const float *c4_out_ptr, float *out_ptr, const float *bias_ptr, size_t output_channel, + size_t plane_size, size_t plane_stride, size_t relu_type); + +void WinogradTransLeft(const float *S, const float *B, float *M, size_t w, size_t h, size_t k, size_t length); +void WinogradTransRight(const float *S, const float *B, float *M, size_t w, size_t h, size_t k, size_t length); + +#if defined(ENABLE_ARM) || defined(ENABLE_SSE) +void ConvDwFp32Center(float *dst, const float *src, const float *weight, const float *bias, size_t height, size_t width, + size_t kernel_h, size_t kernel_w, size_t out_h_step, size_t block_channel, size_t in_sh_step, + size_t in_sw_step, size_t in_kh_step, size_t in_kw_step, size_t relu, size_t relu6); +#ifdef ENABLE_AVX +void ConvDwFp32Border(ConvDwFp32BorderParam *param); +#else +void ConvDwFp32Border(float *dst, const float *src, const float *weight, const float *bias, size_t height, size_t width, + size_t in_kh_step, size_t in_kw_step, size_t kernel_w, size_t relu, size_t relu6); +#endif +void DeconvDwFp32Center(float *dst, const float *src, const float *weight, size_t height, size_t width, size_t kernel_h, + size_t kernel_w, size_t out_h_step, size_t block_channel, size_t in_sh_step, size_t in_sw_step, + size_t in_kh_step, size_t in_kw_step); +void PostFuncBiasReluC8(float *dst, const float *src, const float *bias, size_t oc8div, size_t oc8mod, + size_t plane_size, size_t stride, size_t relu_type); +void ConvDwFp32Row(float *output_ptr, const float *input_ptr, const float *weight_ptr, size_t num_pixels, + size_t output_channel, size_t input_step); +void PostFuncBiasReluC4(float *dst, const float *src, const float *bias, size_t oc4div, size_t oc4mod, + size_t plane_size, size_t plane_stride, size_t relu_type); +#endif + +#ifdef ENABLE_ARM64 +void BiasAdd(const float *bias, float *data, size_t oc4, size_t plan_size); +void BiasAddRelu6(const float *bias, float *data, size_t oc4, size_t plan_size); +void BiasAddRelu(const float *bias, float *data, size_t oc4, size_t plan_size); +void Relu6(float *data, size_t element4); +void Relu(float *data, size_t element4); + +void DeconvDwFp32Border(float *dst, const float *src, const float *weight, size_t height, size_t width, + size_t in_kh_step, size_t in_kw_step, size_t kernel_w); + +void ConvSwFp32Center(float *dst, const float *src, const float *weight, const float *bias, size_t height, size_t width, + size_t kernel_h, size_t kernel_w, size_t out_h_step, size_t block_channel, size_t ic4, + size_t in_sh_step, size_t in_sw_step, size_t in_kh_step, size_t in_kw_step, size_t relu, + size_t relu6); + +void ConvDw3x3Stride1(float *output, const float *buffer, const float *weight, const float *bias, int col_size, + int row_size, int channel, int output_h, int output_w, size_t relu, size_t relu6); + +void ConvDw3x3Stride2(float *output, const float *buffer, const float *weight, const float *bias, int col_size, + int row_size, int channel, int output_h, int output_w, size_t relu, size_t relu6); + +void ConvDw3x3Corner(float *dst, const float *src, const float *weight, const float *bias, int in_kh_step, + int in_kw_step, int channel, size_t relu, size_t relu6); + +void ConvDw3x3Vertical(float *dst, const float *src, const float *weight, const float *bias, int in_kh_step, + int in_kw_step, int channel, size_t relu, size_t relu6); + +void ConvDw3x3Horizontal(float *dst, const float *src, const float *weight, const float *bias, int in_kh_step, + int in_kw_step, int channel, size_t relu, size_t relu6); +#endif + +#ifdef __cplusplus +} +#endif +#endif /* MINDSPORE_LITE_NNACL_FP32_COMMON_FUNC_H_ */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/constant_of_shape_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/constant_of_shape_fp32.h new file mode 100755 index 00000000000..672d30a5e5b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/constant_of_shape_fp32.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_CONSTANT_OF_SHAPE_FP32_H_ +#define MINDSPORE_LITE_NNACL_FP32_CONSTANT_OF_SHAPE_FP32_H_ +#include +#include +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/constant_of_shape_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +inline int ConstantOfShapeInt32(int32_t *output, int start, int end, int32_t value) { + for (int i = start; i < end; i++) { + output[i] = value; + } + return NNACL_OK; +} + +inline int ConstantOfShapeFp32(float *output, int start, int end, float value) { + for (int i = start; i < end; i++) { + output[i] = value; + } + return NNACL_OK; +} + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_CONSTANT_OF_SHAPE_FP32_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/conv_common_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/conv_common_fp32.h new file mode 100755 index 00000000000..fe5bb1b9d15 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/conv_common_fp32.h @@ -0,0 +1,40 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_CONV_COMMON_H_ +#define MINDSPORE_LITE_NNACL_FP32_CONV_COMMON_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/pack.h" +#include "nnacl/op_base.h" +#include "nnacl/common_func.h" +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// fp32 convolution common (im2col+gemm) +void ConvFp32(const float *input_data, float *packed_input, const float *packed_weight, const float *bias_data, + float *col_major_input, float *output_data, int task_id, const ConvParameter *conv_param); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_CONV_COMMON_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/conv_depthwise_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/conv_depthwise_fp32.h new file mode 100755 index 00000000000..26ba01e895c --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/conv_depthwise_fp32.h @@ -0,0 +1,91 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_CONV_DEPTHWISE_H_ +#define MINDSPORE_LITE_NNACL_FP32_CONV_DEPTHWISE_H_ + +#include "nnacl/conv_parameter.h" + +#ifndef ENABLE_ARM64 +void DepthwiseCenter(float *dst, const float *src, const float *weight, const float *bias, int height, int width, + int kernel_h, int kernel_w, int out_h_step, int block_channel, int in_sh_step, int in_sw_step, + int in_kh_step, int in_kw_step, bool is_relu, bool is_relu6); +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +void ConvDw(float *output_data, const float *input_data, const float *weight_data, const float *bias_data, + const ConvParameter *conv_param, int task_id); + +void InitSlidingParam(SlidingWindowParam *sliding, const ConvParameter *conv_param, int block); + +void InitSlidingParamConv(SlidingWindowParam *sliding, const ConvParameter *conv_param, int block); + +void AppendSlidingParamConv(SlidingWindowParam *sliding, const ConvParameter *conv_param, int block); + +void InitSlidingParamConvDw(SlidingWindowParam *sliding, const ConvParameter *conv_param, int block); + +void AppendSlidingParamConvDw(SlidingWindowParam *sliding, const ConvParameter *conv_param, int block); + +void ConvDwSWFp32(float *output_data, const float *input_data, const float *weight_data, const float *bias_data, + const ConvParameter *conv_param, const SlidingWindowParam *sliding, int task_id); + +bool CheckConvDwUse3X3(const ConvParameter *conv_param); + +bool CheckConvDwUseIndirectBuffer(const ConvParameter *conv_param); + +void ConvDwInitIndirection(float **indirect_buffer, float *src, float *zero_ptr, const ConvParameter *conv_param, + int step_h, int step_w); + +#ifdef ENABLE_ARM64 +void ConvDwFp32Indirect3x3(float *output, float **input, const float *weights, const float *bias, int channels, + int output_width, size_t input_stride, size_t relu, size_t relu6); + +void ConvDwFp32Indirect5x5(float *output, float **input, const float *weights, const float *bias, int channels, + int output_width, size_t input_stride, size_t relu, size_t relu6); +#endif + +#ifdef ENABLE_AVX +void ConvDwFp32Avx3x3(float *output, float **input, const float *weights, const float *bias, size_t channels, + size_t output_width, size_t input_stride, size_t relu, size_t relu6); + +void ConvDwFp32Avx5x5(float *output, float **input, const float *weights, const float *bias, size_t channels, + size_t output_width, size_t input_stride, size_t relu, size_t relu6); +#endif + +#if defined(ENABLE_ARM) || (defined(ENABLE_SSE) && !defined(ENABLE_AVX)) +void ConvDw3x3(float *output_data, float *buffer, const float *input_data, const float *weight_data, + const float *bias_data, const ConvParameter *conv_param, int start_oh, int end_oh); + +bool CheckConvDw1DWinograd(const ConvParameter *conv_param, int thread_num); +#endif + +void ConvDwFp32IndirectRow(float *output, float **input, const float *weights, const float *bias, int channels, + int output_width, int input_stride, bool relu, bool relu6, int kernel); + +void ConvDwIndirection(float *output_data, float **indirect_buffer, const float *weight_data, const float *bias_data, + float *zero_ptr, const ConvParameter *conv_param, int task_id); + +void DeconvDwSWFp32(float *output_data, const float *input_data, const float *weight_data, const float *bias_data, + const ConvParameter *conv_param, const SlidingWindowParam *sliding, int task_id); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_CONV_DEPTHWISE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/conv_winograd_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/conv_winograd_fp32.h new file mode 100755 index 00000000000..c4cee273f0d --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/conv_winograd_fp32.h @@ -0,0 +1,44 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_CONV_WINOGRAD_H_ +#define MINDSPORE_LITE_NNACL_FP32_CONV_WINOGRAD_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/pack.h" +#include "nnacl/op_base.h" +#include "nnacl/common_func.h" +#include "nnacl/conv_parameter.h" +#include "nnacl/fp32/winograd_utils.h" +#include "nnacl/fp32/conv_depthwise_fp32.h" + +typedef float *TmpBufferAddress; + +#ifdef __cplusplus +extern "C" { +#endif + +// fp32 convolution winograd +void ConvWinogardFp32(const float *input_data, const float *trans_weight, const float *bias_data, float *output_data, + TmpBufferAddress *buffer_list, int task_id, const ConvParameter *conv_param, + InputTransFunc in_func, OutputTransFunc out_func); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_CONV_WINOGRAD_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/crop_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/crop_fp32.h new file mode 100755 index 00000000000..793c7ab8d8f --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/crop_fp32.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_CROP_H_ +#define MINDSPORE_LITE_NNACL_FP32_CROP_H_ +#include "nnacl/op_base.h" +#include "nnacl/crop_parameter.h" + +#define CROP_OFFSET_MAX_SIZE 4 + +#ifdef __cplusplus +extern "C" { +#endif +void Crop4D(const float *input, float *output, const int *in_shape, const int *out_shape, + const CropParameter *crop_param, int thread_id); +void Crop4DNoParallel(const float *input, float *output, const int *in_shape, const int *out_shape, + const CropParameter *crop_param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_CROP_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/deconv_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/deconv_fp32.h new file mode 100755 index 00000000000..2cd7ea3b02f --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/deconv_fp32.h @@ -0,0 +1,37 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_DECONV_H_ +#define MINDSPORE_LITE_NNACL_FP32_DECONV_H_ + +#include +#include "nnacl/pack.h" +#include "nnacl/op_base.h" +#include "nnacl/conv_parameter.h" +#include "nnacl/errorcode.h" +#include "nnacl/fp32/common_func_fp32.h" +#include "nnacl/base/minimal_filtering_generator.h" + +#ifdef __cplusplus +extern "C" { +#endif +void PackDeConvWeightFp32(const float *weight, float *dst, int input_channel, int output_channel, int plane); +void DeConvPostFp32C8(const float *src, float *tmp_out, const float *bias, float *dst, int output_channel, + const ConvParameter *conv_param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_DECONV_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/deconv_winograd_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/deconv_winograd_fp32.h new file mode 100755 index 00000000000..a2056dd36da --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/deconv_winograd_fp32.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_DECONV_WINOGRAD_H_ +#define MINDSPORE_LITE_NNACL_FP32_DECONV_WINOGRAD_H_ + +#include +#include "nnacl/pack.h" +#include "nnacl/op_base.h" +#include "nnacl/conv_parameter.h" +#include "nnacl/errorcode.h" +#include "nnacl/fp32/common_func_fp32.h" +#include "nnacl/base/minimal_filtering_generator.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int PackDeConvWgDataFp32(const float *nhwc_weight, DeConvComputeUnit *unit, const ConvParameter *conv_param, + const DeConvParam *deconv_param); +void DeconvWg(const float *nhwc_input_, float *tile_in, float *tile_out, int start_index, int calculate_count, + const ConvParameter *conv_param, DeConvParam *deconv_param, int task_id); +void DeconvWgPost(const float *tile_out, float *nc4hw4_output, const ConvParameter *conv_param, + const DeConvParam *deconv_param, int calculate_count, int tile_index); +void TiledC4MatmulFp32(float *dst, const float *src, const float *weight, size_t ic4, size_t cal_num, size_t oc4); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_DECONV_WINOGRAD_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/detection_post_process_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/detection_post_process_fp32.h new file mode 100755 index 00000000000..60b1b0a4697 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/detection_post_process_fp32.h @@ -0,0 +1,59 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_DETECTION_POST_PROCESS_H_ +#define MINDSPORE_LITE_NNACL_FP32_DETECTION_POST_PROCESS_H_ + +#include "nnacl/op_base.h" +#include "nnacl/detection_post_process_parameter.h" + +typedef struct { + float y; + float x; + float h; + float w; +} BboxCenter; + +typedef struct { + float ymin; + float xmin; + float ymax; + float xmax; +} BboxCorner; + +#ifdef __cplusplus +extern "C" { +#endif +int DecodeBoxes(int num_boxes, const float *input_boxes, const float *anchors, + const DetectionPostProcessParameter *param); + +int NmsMultiClassesFastCore(const int num_boxes, const int num_classes_with_bg, const float *input_scores, + void (*)(const float *, int *, int, int), const DetectionPostProcessParameter *param, + const int task_id, const int thread_num); + +int DetectionPostProcessFast(const int num_boxes, const int num_classes_with_bg, const float *input_scores, + const float *decoded_boxes, float *output_boxes, float *output_classes, + float *output_scores, float *output_num, void (*)(const float *, int *, int, int), + const DetectionPostProcessParameter *param); + +int DetectionPostProcessRegular(const int num_boxes, const int num_classes_with_bg, const float *input_scores, + float *output_boxes, float *output_classes, float *output_scores, float *output_num, + void (*)(const float *, int *, int, int), const DetectionPostProcessParameter *param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_DETECTION_POST_PROCESS_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/div_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/div_fp32.h new file mode 100755 index 00000000000..755d678d550 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/div_fp32.h @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_DIV_H_ +#define MINDSPORE_LITE_NNACL_FP32_DIV_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/base/arithmetic_base.h" +#include "nnacl/errorcode.h" + +#ifdef __cplusplus +extern "C" { +#endif +int ElementDiv(const float *in0, const float *in1, float *out, int size); +int ElementDivRelu(const float *in0, const float *in1, float *out, int size); +int ElementDivRelu6(const float *in0, const float *in1, float *out, int size); +int ElementOptDiv(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptDivRelu(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptDivRelu6(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptDivInt(const int *in0, const int *in1, int *out, int size, const ArithmeticParameter *param); +int BroadcastDiv(const float *in0, const float *in1, float *tile_in0, float *tile_in1, float *out, int size, + ArithmeticParameter *param); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_DIV_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/elu_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/elu_fp32.h new file mode 100755 index 00000000000..2881aefa0c0 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/elu_fp32.h @@ -0,0 +1,39 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_ELU_H_ +#define MINDSPORE_LITE_NNACL_FP32_ELU_H_ + +#include "nnacl/op_base.h" + +typedef struct EluParameter { + OpParameter op_parameter_; + // primitive parameter + float alpha_; + + // shape correlative + int in_size_; +} EluParameter; + +#ifdef __cplusplus +extern "C" { +#endif +int Elu(const float *input_data, float *output_data, const EluParameter *parameter, int task_id); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_ELU_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/embedding_lookup_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/embedding_lookup_fp32.h new file mode 100755 index 00000000000..a6e655d8411 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/embedding_lookup_fp32.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_EMBEDDING_LOOKUP_H_ +#define MINDSPORE_LITE_NNACL_FP32_EMBEDDING_LOOKUP_H_ + +#include "nnacl/op_base.h" + +typedef struct EmbeddingLookupParameter { + OpParameter op_parameter_; + // primitive parameter + float max_norm_; + + // shape correlative + bool *is_regulated_; + int ids_size_; + int layer_size_; + int layer_num_; +} EmbeddingLookupParameter; + +#ifdef __cplusplus +extern "C" { +#endif +int EmbeddingLookup(float *input_data, const int *ids, float *output_data, const EmbeddingLookupParameter *parameter, + int task_id); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_EMBEDDING_LOOKUP_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/exp_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/exp_fp32.h new file mode 100755 index 00000000000..4713397c51d --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/exp_fp32.h @@ -0,0 +1,100 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_EXP_H_ +#define MINDSPORE_LITE_NNACL_FP32_EXP_H_ + +#include "nnacl/op_base.h" + +typedef struct ExpParameter { + // Primitive parameter + OpParameter op_parameter_; + float base_; + float scale_; + float shift_; + // other parameter + int thread_num_; + float in_scale_; + float out_scale_; + int element_num_; +} ExpParameter; + +#ifdef __cplusplus +extern "C" { +#endif +int Exp(const float *input_data, float *output_data, const ExpParameter *parameter, int task_id); +void ExpFp32(const float *src, float *dst, int num); + +#if defined(ENABLE_ARM) || defined(ENABLE_SSE) +static inline void simd_exp(MS_FLOAT32X4 input, float *dst) { + static MS_FLOAT32X4 maxv = {88.0f, 88.0f, 88.0f, 88.0f}; + static MS_FLOAT32X4 minv = {-88.0f, -88.0f, -88.0f, -88.0f}; + static MS_FLOAT32X4 param[] = {{0.693147f, 0.693147f, 0.693147f, 0.693147f}, + {1.0f / 120, 1.0f / 120, 1.0f / 120, 1.0f / 120}, + {1.0f / 24, 1.0f / 24, 1.0f / 24, 1.0f / 24}, + {1.0f / 6, 1.0f / 6, 1.0f / 6, 1.0f / 6}, + {0.5f, 0.5f, 0.5f, 0.5f}, + {1.0f, 1.0f, 1.0f, 1.0f}}; + + input = MS_MAXQ_F32(minv, MS_MINQ_F32(input, maxv)); + MS_INT32X4 integer = MS_CVTQPS_EPI32(input / param[0]); + MS_FLOAT32X4 decimal = input - MS_CVTQEPI32_PS(integer) * param[0]; + MS_INT32X4 int_exp = MS_SLLIQ_EPI32(MS_ADDQ_EPI32(integer, MS_MOVQ_EPI32(127)), 23); + MS_FLOAT32X4 decimal_exp = + param[5] + + decimal * (param[5] + decimal * (param[4] + decimal * (param[3] + decimal * (param[2] + decimal * param[1])))); + MS_STQ_F32(dst, decimal_exp * MS_LDQ_F32((float *)(&int_exp))); +} +#endif + +#if defined(ENABLE_AVX) +static inline void simd_exp_avx(MS_FLOAT32X8 input, float *dst) { + static MS_FLOAT32X8 maxv = {88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f}; + static MS_FLOAT32X8 minv = {-88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f}; + static MS_FLOAT32X8 param[] = { + {0.693147f, 0.693147f, 0.693147f, 0.693147f, 0.693147f, 0.693147f, 0.693147f, 0.693147f}, + {1.0f / 120, 1.0f / 120, 1.0f / 120, 1.0f / 120, 1.0f / 120, 1.0f / 120, 1.0f / 120, 1.0f / 120}, + {1.0f / 24, 1.0f / 24, 1.0f / 24, 1.0f / 24, 1.0f / 24, 1.0f / 24, 1.0f / 24, 1.0f / 24}, + {1.0f / 6, 1.0f / 6, 1.0f / 6, 1.0f / 6, 1.0f / 6, 1.0f / 6, 1.0f / 6, 1.0f / 6}, + {0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f}, + {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}; + input = MS_MAX256_F32(minv, MS_MIN256_F32(input, maxv)); + MS_INT32X8 integer = MS_CVT256PS_EPI32(input / param[0]); + MS_FLOAT32X8 decimal = input - MS_CVT256EPI32_PS(integer) * param[0]; + MS_INT32X8 int_exp = MS_SLLI256_EPI32(MS_ADD256_EPI32(integer, MS_MOV256_EPI32(127)), 23); + MS_FLOAT32X8 decimal_exp = + param[5] + + decimal * (param[5] + decimal * (param[4] + decimal * (param[3] + decimal * (param[2] + decimal * param[1])))); + MS_ST256_F32(dst, decimal_exp * MS_LD256_F32((float *)(&int_exp))); +} +#endif + +static inline void single_exp(float src, float *dst) { + static float param[] = {0.693147f, 1.0f / 120, 1.0f / 24, 1.0f / 6, 1.0f / 2, 1.0f}; // log(2.0f) + src = MSMAX(-88.0f, MSMIN(88.0f, src)); + int integer = src / param[0]; + float decimal = src - integer * param[0]; + int int_exp = (integer + 127) << 23; + float decimal_exp = + 1.0f + decimal * (1.0f + decimal * (0.5f + decimal * (param[3] + decimal * (param[2] + decimal * param[1])))); + float *ptr = (float *)&int_exp; + *dst = *ptr * decimal_exp; +} +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_EXP_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/gatherNd_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/gatherNd_fp32.h new file mode 100755 index 00000000000..ec4f4c4da3f --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/gatherNd_fp32.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_GATHERND_H_ +#define MINDSPORE_LITE_NNACL_GATHERND_H_ + +#include "nnacl/op_base.h" + +typedef struct GatherNdParameter { + // Primitive parameter + OpParameter op_parameter_; +} GatherNdParameter; + +#ifdef __cplusplus +extern "C" { +#endif +int GatherNd(const float *input, float *output, const int *in_offset, int area, int count); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_GATHERND_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/gru_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/gru_fp32.h new file mode 100755 index 00000000000..69ddd23bf07 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/gru_fp32.h @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_GRU_FP32_H_ +#define MINDSPORE_LITE_NNACL_FP32_GRU_FP32_H_ +#include "nnacl/gru_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void Gru(float *output, const float *input, const float *weight_g, const float *weight_r, const float *bias, + float *hidden_state, float *gate_buffer, float *matmul_buffer[2], int check_seq_len, + const GruParameter *gru_parm); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_GRU_FP32_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/instance_norm_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/instance_norm_fp32.h new file mode 100755 index 00000000000..b00b7491f90 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/instance_norm_fp32.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_INSTANCE_NORM_H_ +#define MINDSPORE_LITE_NNACL_FP32_INSTANCE_NORM_H_ + +#include "nnacl/op_base.h" +#include "nnacl/instance_norm_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int InstanceNorm(const float *src_data, float *dst_data, const float *gamma_data, const float *beta_data, + const InstanceNormParameter *param, size_t task_id); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_INSTANCE_NORM_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/invert_permutation_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/invert_permutation_fp32.h new file mode 100755 index 00000000000..7151cdfddce --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/invert_permutation_fp32.h @@ -0,0 +1,27 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_INVERT_PERMUTATION_FP32_H_ +#define MINDSPORE_LITE_NNACL_INVERT_PERMUTATION_FP32_H_ + +#ifdef __cplusplus +extern "C" { +#endif +void InvertPermutation(const int *input, int *output, int num); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INVERT_PERMUTATION_FP32_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/l2_norm_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/l2_norm_fp32.h new file mode 100755 index 00000000000..4771d382f11 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/l2_norm_fp32.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_L2NORM_FP32_H_ +#define MINDSPORE_LITE_NNACL_FP32_L2NORM_FP32_H_ + +#include "nnacl/l2_norm_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +int CalcThreadSquareSum(const float *input_ptr, float *sum, int begin, int end); +int ThreadDivSqrtSum(const float *input_ptr, float *output_ptr, const L2NormParameter *param, const float sqrt_sum, + const int begin, const int end); +int ThreadTrailingAxis(const float *input_ptr, float *output_ptr, const L2NormParameter *param, const int begin, + const int end); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_L2NORM_FP32_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/layer_norm_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/layer_norm_fp32.h new file mode 100755 index 00000000000..3849fa125b4 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/layer_norm_fp32.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_LAYER_NORM_FP32_H_ +#define MINDSPORE_LITE_NNACL_FP32_LAYER_NORM_FP32_H_ + +#include "nnacl/op_base.h" +#include "nnacl/layer_norm_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int LayerNorm(const float *src_data, const float *gamma_data, const float *beta_data, float *dst_data, float *out_mean, + float *out_deno, LayerNormParameter *param, size_t task_id); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_LAYER_NORM_FP32_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/local_response_norm_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/local_response_norm_fp32.h new file mode 100755 index 00000000000..57046bb32db --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/local_response_norm_fp32.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_LOCAL_RESPONSE_NORM_H_ +#define MINDSPORE_LITE_NNACL_LOCAL_RESPONSE_NORM_H_ + +#include "nnacl/op_base.h" + +typedef struct LocalResponseNormParameter { + // Primitive parameter + OpParameter op_parameter_; + int depth_radius_; + float bias_; + float alpha_; + float beta_; +} LocalResponseNormParameter; + +#ifdef __cplusplus +extern "C" { +#endif +int LocalResponseNorm(const float *input_ptr, int out_size, int channel, float *output_ptr, + const LocalResponseNormParameter *param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_LOCAL_RESPONSE_NORM_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/lstm_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/lstm_fp32.h new file mode 100755 index 00000000000..3a7142e6c6c --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/lstm_fp32.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_LSTM_H_ +#define MINDSPORE_LITE_NNACL_FP32_LSTM_H_ + +#include "nnacl/lstm_parameter.h" +#ifdef __cplusplus +extern "C" { +#endif +void PackLstmWeight(float *dst, const float *src, int batch, int deep, int col, int col_align); + +void PackLstmBias(float *dst, const float *src, int batch, int col, int col_align, bool is_bidirectional); + +void PackLstmInput(const float *src, float *dst, int row, int deep); + +void LstmMatMul(float *c, const float *a, const float *b, const float *bias, int row, int deep, int col, bool is_vec); + +void ElementMulAcc(const float *input0, const float *input1, float *output, int element_size); + +int ElementOptMulAcc(const float *input0, const float input1, float *output, const int element_size); + +void Lstm(float *output, const float *input, const float *weight_i, const float *weight_h, const float *input_bias, + const float *state_bias, float *hidden_state, float *cell_state, float *state_buffer[2], float *buffer[4], + const LstmParameter *lstm_param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_LSTM_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/matmul_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/matmul_fp32.h new file mode 100755 index 00000000000..4a629eeadd3 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/matmul_fp32.h @@ -0,0 +1,83 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_MATMUL_H_ +#define MINDSPORE_LITE_NNACL_FP32_MATMUL_H_ + +#include +#include +#include "nnacl/errorcode.h" +#include "nnacl/matmul_parameter.h" +#include "nnacl/op_base.h" + +#define ADD_BIAS(value, bias, c) \ + if (bias != NULL) value = value + bias[c]; + +#define DO_RELU(value, act_type) \ + if (act_type == ActType_Relu) value = MSMAX(0.0f, value); + +#define DO_RELU6(value, act_type) \ + if (act_type == ActType_Relu6) value = MSMIN(6.0f, value); \ + if (act_type == ActType_Relu6) value = MSMAX(0.0f, value); + +#ifdef __cplusplus +extern "C" { +#endif +void MatMulOpt(const float *a, const float *b, float *c, const float *bias, ActType act_type, int deep, int row, + int col, size_t stride, int out_type); +void MatVecMulFp32(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int col); + +void RowMajor2ColMajor(const float *src_ptr, float *dst_ptr, int row, int col); +void RowMajor2Row4Major(const float *src_ptr, float *dst_ptr, int row, int col); +void RowMajor2Row6Major(const float *src_ptr, float *dst_ptr, int row, int col); +void RowMajor2Row8Major(const float *src_ptr, float *dst_ptr, int row, int col); +void RowMajor2Row12Major(const float *src_ptr, float *dst_ptr, int row, int col); +void RowMajor2Row16Major(const float *src_ptr, float *dst_ptr, int row, int col); +void RowMajor2Col4Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col); +void RowMajor2Col6Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col); +void RowMajor2Col8Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col); +void RowMajor2Col12Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col); +void RowMajor2Col16Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col); + +#ifdef ENABLE_ARM64 +void MatmulFloatNeon64(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row, + int col, size_t stride, size_t writeNhwc, size_t WriteWino); +void MatmulFloatNeon64Opt(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row, + int col, size_t stride, size_t write_mode); +#elif ENABLE_ARM32 +void MatmulFloatNeon32(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row, + int col, int stride, size_t writeNhwc, size_t WriteWino); +void MatmulFloatNeon32Opt(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row, + int col, int stride, int write_mode); +void MatmulFloatNeon32Opt12x4(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, + int row, int col, int stride, int write_mode); +#elif ENABLE_SSE +#include +void MatmulFloatSse64(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row, + int col, int stride, size_t writeNhwc, size_t WriteWino); +void MatmulFloatSse64Opt(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row, + int col, int stride, int write_mode); +#ifdef ENABLE_AVX +void MatmulFloatAvxOpt(const float *a, const float *b, float *c, const float *bias, size_t act_type, size_t depth, + size_t row, size_t col, size_t stride, size_t write_mode); +#endif +#endif + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_MATMUL_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/mul_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/mul_fp32.h new file mode 100755 index 00000000000..0ff54a43c82 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/mul_fp32.h @@ -0,0 +1,49 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_MUL_H_ +#define MINDSPORE_LITE_NNACL_FP32_MUL_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/base/arithmetic_base.h" +#include "nnacl/errorcode.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ElementMul(const float *in0, const float *in1, float *out, int size); +int ElementMulRelu(const float *in0, const float *in1, float *out, int size); +int ElementMulRelu6(const float *in0, const float *in1, float *out, int size); +int ElementMulInt(const int *in0, const int *in1, int *out, int size); +int ElementMulReluInt(const int *in0, const int *in1, int *out, int size); +int ElementMulRelu6Int(const int *in0, const int *in1, int *out, int size); +int ElementOptMul(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptMulRelu(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptMulRelu6(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptMulInt(const int *in0, const int *in1, int *out, int size, const ArithmeticParameter *param); +int ElementOptMulReluInt(const int *in0, const int *in1, int *out, int size, const ArithmeticParameter *param); +int ElementOptMulRelu6Int(const int *in0, const int *in1, int *out, int size, const ArithmeticParameter *param); +int BroadcastMul(const float *in0, const float *in1, float *tile_in0, float *tile_in1, float *out, int size, + ArithmeticParameter *param); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_MUL_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/one_hot_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/one_hot_fp32.h new file mode 100755 index 00000000000..7b2039bb089 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/one_hot_fp32.h @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_ONE_HOT_H_ +#define MINDSPORE_LITE_NNACL_FP32_ONE_HOT_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" + +typedef struct OneHotParameter { + // Primitive parameter + OpParameter op_parameter_; + int axis_; + // other parameter + int depth_; + float on_value_; + float off_value_; + int outer_size_; + int inner_size_; + bool support_neg_index_; // if true, support neg index in indices tensor; if false, set off_value on neg index. +} OneHotParameter; + +#ifdef __cplusplus +extern "C" { +#endif +int OneHot(const int *indices, float *output, const OneHotParameter *one_hot_param, const int tid, + const int thread_num); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_ONE_HOT_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/pack_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/pack_fp32.h new file mode 100755 index 00000000000..230f4eb9576 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/pack_fp32.h @@ -0,0 +1,70 @@ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_PACK_H_ +#define MINDSPORE_LITE_NNACL_FP32_PACK_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void PackHWCToWHC(const float *src, float *dst, int height, int width, int channel); +void PackNHWCToNC4HW4Fp32(const void *src, void *dst, int batch, int plane, int channel); +void PackNCHWToNC4HW4Fp32(const void *src, void *dst, int batch, int plane, int channel); +void PackNHWCToNHWC4Fp32(const void *src, void *dst, int batch, int plane, int channel); +void PackNHWCToNHWC8Fp32(const void *src, void *dst, int batch, int plane, int channel); +// Note: If not multithreaded, please set task_id = 0 and thread_count = 0; +void PackNHWCToNCHWFp32(const void *src, void *dst, int batch, int plane, int channel, int task_id, int thread_count); +void PackNCHWToNHWCFp32(const void *src, void *dst, int batch, int plane, int channel, int task_id, int thread_count); +void PackNHWC4ToNHWCFp32(const void *src, void *dst, int batch, int plane, int channel); +void PackNC4HW4ToNHWC4Fp32(const void *src, void *dst, int batch, int plane, int channel); +void PackNC4HW4ToNHWCFp32(const void *src, void *dst, int batch, int plane, int channel); +void PackNHWCToC8HWN8Fp32(const void *src, void *dst, int batch, int plane, int channel); + +void PackWeightKHWToHWKFp32(const void *src, void *dst, int plane, int channel); +void PackDepthwiseIndirectWeightC4Fp32(const void *src, void *dst, int height, int width, int channel); +void PackDepthwiseIndirectWeightC8Fp32(const void *src, void *dst, int height, int width, int channel); +void Im2ColPackUnitFp32(const float *input_data, const ConvParameter *conv_param, float *packed_input, int real_cal_num, + int block_index); + +#if defined(ENABLE_ARM) || (defined(ENABLE_SSE) && !defined(ENABLE_AVX)) +void PackWeightConvDw3x3Fp32(const void *src, void *dst, int channel); +#endif + +// Transpose 8X8 Fp32 block data +typedef void (*Transpose8X8Fp32Func)(const float *src_ptr, float *dst_ptr, int src_stride, int dst_stride); +#ifdef ENABLE_ARM64 +void Transpose8X8Fp32Arm64(const float *src_ptr, float *dst_ptr, int src_stride, int dst_stride); +#endif +#ifdef ENABLE_ARM32 +void Transpose8X8Fp32Arm32(const float *src_ptr, float *dst_ptr, int src_stride, int dst_stride); +#endif +#ifdef ENABLE_AVX +void Transpose8X8Fp32Avx(const float *src_ptr, float *dst_ptr, int src_stride, int dst_stride); +#endif +#if defined(ENABLE_SSE) && !defined(ENABLE_AVX) +void Transpose8X8Fp32Sse(const float *src_ptr, float *dst_ptr, int src_stride, int dst_stride); +#endif + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_PAD_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/pad_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/pad_fp32.h new file mode 100755 index 00000000000..db5876f51a8 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/pad_fp32.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_PAD_H_ +#define MINDSPORE_LITE_NNACL_FP32_PAD_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include +#include +#include "nnacl/op_base.h" +#include "nnacl/pad_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void Pad(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, + const int *paddings, int tid, int thread_num); +void MirrorPad(const float *input_data, float *output_data, const int *input_shape, const PadParameter *pad_param, + int begin, int end); + +int TransOut2InputDimIndex(int out_dim_index, int left_pad, int in_dim, int offset); +int GetInputFlattenIndex(int out_flatten_index, const int *input_shape, const PadParameter *pad_param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_PAD_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/pooling_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/pooling_fp32.h new file mode 100755 index 00000000000..71033e5072f --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/pooling_fp32.h @@ -0,0 +1,38 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_POOLING_H_ +#define MINDSPORE_LITE_NNACL_FP32_POOLING_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/pooling_parameter.h" +#include "nnacl/int8/quantize.h" + +#ifdef __cplusplus +extern "C" { +#endif +int AvgPooling(const float *input_ptr, float *output_ptr, const PoolingParameter *pooling_param, int task_id, + float minf, float maxf); +void MaxPooling(const float *input_ptr, float *output_ptr, const PoolingParameter *pooling_param, int task_id, + float minf, float maxf); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_POOLING_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/power_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/power_fp32.h new file mode 100755 index 00000000000..8b62fee9ecc --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/power_fp32.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_POWER_FP32_H_ +#define MINDSPORE_LITE_NNACL_FP32_POWER_FP32_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/power_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void Power(const float *input, const float *exponent, float *output, int len, float scale, float shift, bool broadcast); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_POWER_FP32_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/prelu_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/prelu_fp32.h new file mode 100755 index 00000000000..f9d16a26673 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/prelu_fp32.h @@ -0,0 +1,60 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_PRELU_H_ +#define MINDSPORE_LITE_NNACL_FP32_PRELU_H_ + +#include "nnacl/op_base.h" +#include "nnacl/prelu_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void PRelu(float *input, float *output, const PReluParameter *prelu_param_, int task_id); + +void PReluShareChannel(float *input, float *output, const PReluParameter *prelu_param_, int task_id); +#ifdef __cplusplus +} +#endif + +#define PRELU_CALCULATE_256X8(dst, src) \ + MS_FLOAT32X8 dst##1 = \ + MS_MUL256_F32(src##1, MS_BLEND256_F32(slope_value_8, one_value_8, MS_CMP256_F32(src##1, zero_value_8, 30))); \ + MS_FLOAT32X8 dst##2 = \ + MS_MUL256_F32(src##2, MS_BLEND256_F32(slope_value_8, one_value_8, MS_CMP256_F32(src##2, zero_value_8, 30))); \ + MS_FLOAT32X8 dst##3 = \ + MS_MUL256_F32(src##3, MS_BLEND256_F32(slope_value_8, one_value_8, MS_CMP256_F32(src##3, zero_value_8, 30))); \ + MS_FLOAT32X8 dst##4 = \ + MS_MUL256_F32(src##4, MS_BLEND256_F32(slope_value_8, one_value_8, MS_CMP256_F32(src##4, zero_value_8, 30))); \ + MS_FLOAT32X8 dst##5 = \ + MS_MUL256_F32(src##5, MS_BLEND256_F32(slope_value_8, one_value_8, MS_CMP256_F32(src##5, zero_value_8, 30))); \ + MS_FLOAT32X8 dst##6 = \ + MS_MUL256_F32(src##6, MS_BLEND256_F32(slope_value_8, one_value_8, MS_CMP256_F32(src##6, zero_value_8, 30))); \ + MS_FLOAT32X8 dst##7 = \ + MS_MUL256_F32(src##7, MS_BLEND256_F32(slope_value_8, one_value_8, MS_CMP256_F32(src##7, zero_value_8, 30))); \ + MS_FLOAT32X8 dst##8 = \ + MS_MUL256_F32(src##8, MS_BLEND256_F32(slope_value_8, one_value_8, MS_CMP256_F32(src##8, zero_value_8, 30))); + +#define PRELU_CALCULATE_128X8(dst, src) \ + MS_FLOAT32X4 dst##1 = MS_MULQ_F32(src##1, MS_BLENDQ_F32(slope_value, one_value, MS_CMPGTQ_F32(src##1, zero_value))); \ + MS_FLOAT32X4 dst##2 = MS_MULQ_F32(src##2, MS_BLENDQ_F32(slope_value, one_value, MS_CMPGTQ_F32(src##2, zero_value))); \ + MS_FLOAT32X4 dst##3 = MS_MULQ_F32(src##3, MS_BLENDQ_F32(slope_value, one_value, MS_CMPGTQ_F32(src##3, zero_value))); \ + MS_FLOAT32X4 dst##4 = MS_MULQ_F32(src##4, MS_BLENDQ_F32(slope_value, one_value, MS_CMPGTQ_F32(src##4, zero_value))); \ + MS_FLOAT32X4 dst##5 = MS_MULQ_F32(src##5, MS_BLENDQ_F32(slope_value, one_value, MS_CMPGTQ_F32(src##5, zero_value))); \ + MS_FLOAT32X4 dst##6 = MS_MULQ_F32(src##6, MS_BLENDQ_F32(slope_value, one_value, MS_CMPGTQ_F32(src##6, zero_value))); \ + MS_FLOAT32X4 dst##7 = MS_MULQ_F32(src##7, MS_BLENDQ_F32(slope_value, one_value, MS_CMPGTQ_F32(src##7, zero_value))); \ + MS_FLOAT32X4 dst##8 = MS_MULQ_F32(src##8, MS_BLENDQ_F32(slope_value, one_value, MS_CMPGTQ_F32(src##8, zero_value))); + +#endif // MINDSPORE_LITE_NNACL_FP32_PRELU_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/prior_box_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/prior_box_fp32.h new file mode 100755 index 00000000000..d37ca65956b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/prior_box_fp32.h @@ -0,0 +1,38 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_PRIOR_BOX_FP32_H_ +#define MINDSPORE_LITE_NNACL_FP32_PRIOR_BOX_FP32_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/prior_box_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +static int PriorBox(const float *input_data, float *output_data, const size_t size, const int tid, + const int thread_num) { + size_t unit_size = size / thread_num; + size_t copy_size = (tid == thread_num - 1) ? size - unit_size * tid : unit_size; + (void)memcpy(output_data + tid * unit_size, input_data + tid * unit_size, copy_size * sizeof(float)); + return NNACL_OK; +} +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_PRIOR_BOX_FP32_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/range_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/range_fp32.h new file mode 100755 index 00000000000..dfa49af0bea --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/range_fp32.h @@ -0,0 +1,50 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_RANGE_H_ +#define MINDSPORE_LITE_NNACL_RANGE_H_ + +#include "nnacl/op_base.h" + +typedef struct RangeParameter { + // Primitive parameter + OpParameter op_parameter_; + int dType_; + int start_; + int limit_; + int delta_; +} RangeParameter; + +#ifdef __cplusplus +extern "C" { +#endif +inline void Range(float *output_ptr, float start, float delta, int nums) { + for (int i = 0; i < nums; ++i, start += delta) { + output_ptr[i] = start; + } +} + +inline void RangeInt(int *output_ptr, int start, int delta, int nums) { + for (int i = 0; i < nums; ++i, start += delta) { + output_ptr[i] = start; + } +} + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_RANGE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/rank_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/rank_fp32.h new file mode 100755 index 00000000000..7c132c43467 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/rank_fp32.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_RANK_H_ +#define MINDSPORE_LITE_NNACL_RANK_H_ + +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif +inline void Rank(float *output, int rank) { + output[0] = (float)(rank); + return; +} +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_RANK_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/reduce_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/reduce_fp32.h new file mode 100755 index 00000000000..30901f06226 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/reduce_fp32.h @@ -0,0 +1,58 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_REDUCE_H_ +#define MINDSPORE_LITE_NNACL_FP32_REDUCE_H_ +#include "nnacl/op_base.h" +#include "nnacl/reduce_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +int ReduceMean(int outer_size, int inner_size, int axis_size, const float *src_data, float *dst_data, int tid, + int thread_num); +int IntReduceMean(int outer_size, int inner_size, int axis_size, const int *src_data, int *dst_data, int tid, + int thread_num); +int ReduceSum(int outer_size, int inner_size, int axis_size, const float *src_data, float *dst_data, int tid, + int thread_num); +int IntReduceSum(int outer_size, int inner_size, int axis_size, const int *src_data, int *dst_data, int tid, + int thread_num); +int ReduceMax(int outer_size, int inner_size, int axis_size, const float *src_data, float *dst_data, int tid, + int thread_num); +int IntReduceMax(int outer_size, int inner_size, int axis_size, const int *src_data, int *dst_data, int tid, + int thread_num); +int ReduceMin(int outer_size, int inner_size, int axis_size, const float *src_data, float *dst_data, int tid, + int thread_num); +int IntReduceMin(int outer_size, int inner_size, int axis_size, const int *src_data, int *dst_data, int tid, + int thread_num); +int ReduceProd(int outer_size, int inner_size, int axis_size, const float *src_data, float *dst_data, int tid, + int thread_num); +int IntReduceProd(int outer_size, int inner_size, int axis_size, const int *src_data, int *dst_data, int tid, + int thread_num); +int ReduceSumSquare(int outer_size, int inner_size, int axis_size, const float *src_data, float *dst_data, int tid, + int thread_num); +int ReduceAll(int outer_size, int inner_size, int axis_size, const bool *src_data, bool *dst_data, int tid, + int thread_num); + +#ifdef ENABLE_NNACL_INFER_SHAPE +int ReduceInferShape(int **in_shape, size_t *dim_size, int *out_shape, int *in_format, int *out_format, + int *in_datatype, int *out_datatype, OpParameter *param); +#endif +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_REDUCE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/resize_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/resize_fp32.h new file mode 100755 index 00000000000..4e71d1639ed --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/resize_fp32.h @@ -0,0 +1,71 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_RESIZE_H_ +#define MINDSPORE_LITE_NNACL_FP32_RESIZE_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif +typedef float (*CalculateOriginalCoordinate)(int x_resized, int length_original, int length_resized); + +int PrepareResizeBilinear(const int *input_shape, const int *output_shape, CalculateOriginalCoordinate calculate, + int *y_bottoms, int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, + float *x_left_weights); + +int PrepareResizeBicubic(const int *input_shape, const int *output_shape, CalculateOriginalCoordinate calculate, + int *y_bottoms, int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, + float *x_left_weights, float cubic_coeff); + +int ResizeBilinear(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, + const int *y_bottoms, const int *y_tops, const int *x_lefts, const int *x_rights, + const float *y_bottom_weights, const float *x_left_weights, float *line0, float *line1, + const int h_begin, const int h_end); + +int ResizeBicubic(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, + const int *y_bottoms, const int *y_tops, const int *x_lefts, const int *x_rights, + const float *y_bottom_weights, const float *x_left_weights, float *line_buffer, const int h_begin, + const int h_end); + +int PrepareCropAndResizeBilinear(const int *input_shape, const float *boxes, const int *box_idx, + const int *output_shape, int *y_bottoms, int *y_tops, int *x_lefts, int *x_rights, + float *y_bottom_weights, float *x_left_weights); + +int CropAndResizeBilinear(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, + const int *y_bottoms, const int *y_tops, const int *x_lefts, const int *x_rights, + const float *y_bottom_weights, const float *x_left_weights, float *line0, float *line1, + const int h_begin, const int h_end); + +int ResizeNearestNeighbor(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, + CalculateOriginalCoordinate calculate, int coordinate_transform_mode, int tid, + int thread_num); + +float CalculateAsymmetric(int x_resized, int length_original, int length_resized); + +float CalculateAlignCorners(int x_resized, int length_original, int length_resized); + +float CalculateHalfPixel(int x_resized, int length_original, int length_resized); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_RESIZE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/reverse_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/reverse_fp32.h new file mode 100755 index 00000000000..d8e58d17bd2 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/reverse_fp32.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_REVERSE_H_ +#define MINDSPORE_LITE_NNACL_REVERSE_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#define REVERSE_SHAPE_MAX_SIZE 4 + +// For reverse. +typedef struct ReverseParameter { + OpParameter op_parameter_; + int axis_[REVERSE_SHAPE_MAX_SIZE]; + int num_axis_; +} ReverseParameter; + +#ifdef __cplusplus +extern "C" { +#endif +int Reverse(const float *input, float *output, size_t elem_size, int *index); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_REVERSE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/reverse_sequence_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/reverse_sequence_fp32.h new file mode 100755 index 00000000000..d0b18c39dd9 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/reverse_sequence_fp32.h @@ -0,0 +1,33 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_REVERSE_SEQUENCE_H_ +#define MINDSPORE_LITE_NNACL_FP32_REVERSE_SEQUENCE_H_ + +#include +#include "nnacl/common_func.h" +#include "nnacl/op_base.h" +#include "nnacl/reverse_sequence_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void ReverseSequence(float *input0, const void *input1, float *output, ReverseSequenceParameter *para); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_REVERSE_SEQUENCE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/roi_pooling_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/roi_pooling_fp32.h new file mode 100755 index 00000000000..7e13f3d2180 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/roi_pooling_fp32.h @@ -0,0 +1,54 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_ROI_POOLING_H_ +#define MINDSPORE_LITE_NNACL_FP32_ROI_POOLING_H_ + +#include "nnacl/op_base.h" + +typedef struct ROIPoolingParameter { + // primitive parameter + OpParameter op_parameter_; + int pooledW_; + int pooledH_; + float scale_; + + // shape correlative + int in_strides_[DIMENSION_4D]; + int out_strides_[DIMENSION_4D]; + int ndim_; + int input_w_; + int input_h_; + int input_n_; + int input_c_; + int output_w_; + int output_h_; + int output_n_; + int output_c_; + + // other parameter + int thread_num_; +} ROIPoolingParameter; + +#ifdef __cplusplus +extern "C" { +#endif +int ROIPooling(const float *in_ptr, float *out_ptr, const float *roi, float *max_c, int tid, + const ROIPoolingParameter *param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_ROI_POOLING_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/scale_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/scale_fp32.h new file mode 100755 index 00000000000..bc1df65cb3b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/scale_fp32.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_SCALE_FP32_H_ +#define MINDSPORE_LITE_NNACL_SCALE_FP32_H_ + +#include "nnacl/op_base.h" +#include "nnacl/scale.h" +#ifdef __cplusplus +extern "C" { +#endif +void DoScale(const float *in_data, float *out_data, const float *scale, const float *offset, int task_id, + const ScaleParameter *scale_param); +void DoScaleRelu(const float *in_data, float *out_data, const float *scale, const float *offset, int task_id, + const ScaleParameter *scale_param); +void DoScaleRelu6(const float *in_data, float *out_data, const float *scale, const float *offset, int task_id, + const ScaleParameter *scale_param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_SCALE_FP32_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/scatter_nd_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/scatter_nd_fp32.h new file mode 100755 index 00000000000..fcae3a8d310 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/scatter_nd_fp32.h @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_SCATTER_ND_FP32_H_ +#define MINDSPORE_LITE_NNACL_FP32_SCATTER_ND_FP32_H_ + +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif +int DoScatterND(float *output_ptr, const float *update, int *output_unit_offsets, int unit_size, int num_units); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_SCATTER_ND_FP32_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/softmax_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/softmax_fp32.h new file mode 100755 index 00000000000..553b79ba6dc --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/softmax_fp32.h @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_SOFTMAX_H_ +#define MINDSPORE_LITE_NNACL_FP32_SOFTMAX_H_ + +#include "nnacl/op_base.h" +#include "nnacl/softmax_parameter.h" +#ifdef __cplusplus +extern "C" { +#endif +void Softmax(const float *input_ptr, float *output_ptr, float *sum_data, const SoftmaxParameter *parameter); +void SoftmaxLastAxis(const float *src, float *dst, int batch, int channel); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_SOFTMAX_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/space_to_batch_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/space_to_batch_fp32.h new file mode 100755 index 00000000000..84aa1e40072 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/space_to_batch_fp32.h @@ -0,0 +1,49 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_SRC_BACKEND_ARM_NNACL_FP32_SPACE_TO_BATCH_H_ +#define MINDSPORE_LITE_SRC_BACKEND_ARM_NNACL_FP32_SPACE_TO_BATCH_H_ + +#include +#include "nnacl/op_base.h" + +typedef struct SpaceToBatchParameter { + // primitive parameter + OpParameter op_parameter_; + int block_sizes_[4]; + int paddings_[4]; + + // shape correlative + int input_shape_[4]; + int output_shape_[4]; + int in_stride_[4]; + int out_stride_[4]; + int padded_in_shape_[4]; + + // other parameter + bool need_paddings_; + int m_; +} SpaceToBatchParameter; +#ifdef __cplusplus +extern "C" { +#endif + +void DoSpaceToBatch(const float *input, float *output, const int *in_shape, const int *out_shape, const int *in_stride, + const int *out_stride, const int *blocks, const int *paddings, int thread, int task_id); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_SRC_BACKEND_ARM_NNACL_FP32_SPACE_TO_BATCH_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/sparse_to_dense_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/sparse_to_dense_fp32.h new file mode 100755 index 00000000000..2be2695bc5b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/sparse_to_dense_fp32.h @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_SPARSETODENSE_H_ +#define MINDSPORE_LITE_NNACL_FP32_SPARSETODENSE_H_ + +#include "nnacl/sparse_to_dense_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void SparseToDense(int **sparse_indices_vect, const int *output_shape, const float *sparse_values, float default_value, + float *output, bool isScalar, int index_start, int index_end, int out_width); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_SPARSETODENSE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/splice_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/splice_fp32.h new file mode 100755 index 00000000000..42db661670b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/splice_fp32.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_SPLICE_FP32_H_ +#define MINDSPORE_LITE_NNACL_FP32_SPLICE_FP32_H_ +#include +#include "nnacl/splice_parameter.h" +#ifdef __cplusplus +extern "C" { +#endif + +void SpliceFp32(const float *src_data, int src_row, int src_col, const SpliceParameter *splice_parameter, + float *dst_data, int dst_row, int dst_col); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_FP32_SPLICE_FP32_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/squared_difference.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/squared_difference.h new file mode 100755 index 00000000000..71098b55253 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/squared_difference.h @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_SQUARED_DIFFERENCE_H_ +#define MINDSPORE_LITE_NNACL_SQUARED_DIFFERENCE_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/base/arithmetic_base.h" +#include "nnacl/errorcode.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Element Squared Difference */ +int ElementSquaredDifference(const float *in0, const float *in1, float *out, int size); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_SQUARED_DIFFERENCE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/strided_slice_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/strided_slice_fp32.h new file mode 100755 index 00000000000..12a0c31779c --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/strided_slice_fp32.h @@ -0,0 +1,33 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_STRIDED_SLICE_FP32_H_ +#define MINDSPORE_LITE_NNACL_FP32_STRIDED_SLICE_FP32_H_ + +#include "nnacl/op_base.h" +#include "nnacl/strided_slice_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +int DoStridedSlice(const void *inputs, void *output, StridedSliceParameter *param); + +void FastStride(const uint8_t *input, uint8_t *output, int split_len, int stride, size_t outer, size_t inner_size, + size_t in_offset); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_STRIDED_SLICE_FP32_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/sub_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/sub_fp32.h new file mode 100755 index 00000000000..b8464171908 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/sub_fp32.h @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_SUB_FP32_H_ +#define MINDSPORE_LITE_NNACL_SUB_FP32_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/base/arithmetic_base.h" +#include "nnacl/errorcode.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ElementSub(const float *in0, const float *in1, float *out, int size); +int ElementSubInt(const int *in0, const int *in1, int *out, int size); +int ElementSubRelu(const float *in0, const float *in1, float *out, int size); +int ElementSubRelu6(const float *in0, const float *in1, float *out, int size); +int ElementOptSub(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptSubRelu(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptSubRelu6(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptSubInt(const int *in0, const int *in1, int *out, int size, const ArithmeticParameter *param); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_SUB_FP32_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/topk_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/topk_fp32.h new file mode 100755 index 00000000000..64bfd2a242d --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/topk_fp32.h @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_TOPK_H_ +#define MINDSPORE_LITE_NNACL_TOPK_H_ + +#include "nnacl/op_base.h" + +typedef struct TopkNode { + float element; + int32_t index; +} TopkNode; + +typedef struct TopkParameter { + // primitive parameter + OpParameter op_parameter_; + int k_; + bool sorted_; + + // other parameter + int last_dim_size_; + int loop_num_; + void *topk_node_list_; +} TopkParameter; + +#ifdef __cplusplus +extern "C" { +#endif +void Topk(float *input_data, float *output_data, int32_t *output_index, TopkParameter *parameter); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_TOPK_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/transpose_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/transpose_fp32.h new file mode 100755 index 00000000000..b3e6a5fb3ba --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/transpose_fp32.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_TRANSPOSE_H_ +#define MINDSPORE_LITE_NNACL_FP32_TRANSPOSE_H_ + +#include +#include "nnacl/transpose.h" +#include "nnacl/errorcode.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int DoTransposeFp32(const float *in_data, float *out_data, const int *output_shape, TransposeParameter *param); +void TransposeDimsFp32(const float *in_data, float *out_data, const int *output_shape, int *size, int *position, + TransposeParameter *transpose_param, int task_id, int thread_num); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_TRANSPOSE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/unique_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/unique_fp32.h new file mode 100755 index 00000000000..95b89350c16 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/unique_fp32.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_UNIQUE_H +#define MINDSPORE_LITE_NNACL_UNIQUE_H + +#include "nnacl/op_base.h" + +typedef struct UniqueParameter { + // primitive parameter + OpParameter op_parameter_; +} UniqueParameter; + +#ifdef __cplusplus +extern "C" { +#endif +void Unique(const float *input, int input_len, float *output0, int *output0_len, int *output1); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_UNIQUE_H diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/where_fp32.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/where_fp32.h new file mode 100755 index 00000000000..70f7d44217f --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/where_fp32.h @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP32_WHERE_Fp32_H_ +#define MINDSPORE_LITE_NNACL_FP32_WHERE_Fp32_H_ + +#include "nnacl/op_base.h" +#include "nnacl/where_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void WhereWithTripleInputs(const bool *condition, const float *x, const float *y, float *output, + WhereParameter *where_param_, int task_id); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP32_WHERE_Fp32_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/winograd_transform.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/winograd_transform.h new file mode 100755 index 00000000000..58da6822157 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/winograd_transform.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_WINOGRAD_TRANSFORM_H_ +#define MINDSPORE_LITE_NNACL_WINOGRAD_TRANSFORM_H_ + +#ifdef ENABLE_ARM +#include +#endif +#include +#include "nnacl/pack.h" +#include "nnacl/fp32/winograd_utils.h" + +#ifdef __cplusplus +extern "C" { +#endif +// for fp32 winograd input/output transform +void WinogradInputTransform(const float *input_data, float *trans_input, float *tmp_data, int cal_num, + int out_tile_index, int out_w_block_num, const ConvParameter *conv_param, + InputTransFunc func); + +void WinogradOutputTransform(const float *gemm_out, float *out_data, const float *bias_data, int cal_num, + int out_tile_index, int output_unit_num, const ConvParameter *conv_param, + OutputTransFunc func); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_WINOGRAD_TRANSFORM_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/winograd_utils.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/winograd_utils.h new file mode 100755 index 00000000000..22ed181a9a7 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/fp32/winograd_utils.h @@ -0,0 +1,316 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_WINOGRAD_UTILS_H_ +#define MINDSPORE_LITE_NNACL_WINOGRAD_UTILS_H_ + +#ifdef ENABLE_ARM +#include +#endif +#include "nnacl/conv_parameter.h" +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif +typedef void (*InputTransFunc)(const float *src_data, float *dst_data, int src_step, int dst_step, int real_c); + +typedef void (*OutputTransFunc)(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); + +void GeneralInputTransformUnit(const float *src_data, float *dst_data, const float *matrix_b, const float *matrix_bt, + int src_step, int dst_step, int in_unit); + +void GeneralOutputTransformUnit(const float *src_data, float *dst_data, const float *bias_data, const float *matrix_a, + const float *matrix_at, int src_step, int dst_step, int in_unit, int out_unit); + +#define Load16Data \ + src[0] = MS_LDQ_F32(src_data + 0 * src_step); \ + src[1] = MS_LDQ_F32(src_data + 1 * src_step); \ + src[2] = MS_LDQ_F32(src_data + 2 * src_step); \ + src[3] = MS_LDQ_F32(src_data + 3 * src_step); \ + src[4] = MS_LDQ_F32(src_data + 4 * src_step); \ + src[5] = MS_LDQ_F32(src_data + 5 * src_step); \ + src[6] = MS_LDQ_F32(src_data + 6 * src_step); \ + src[7] = MS_LDQ_F32(src_data + 7 * src_step); \ + src[8] = MS_LDQ_F32(src_data + 8 * src_step); \ + src[9] = MS_LDQ_F32(src_data + 9 * src_step); \ + src[10] = MS_LDQ_F32(src_data + 10 * src_step); \ + src[11] = MS_LDQ_F32(src_data + 11 * src_step); \ + src[12] = MS_LDQ_F32(src_data + 12 * src_step); \ + src[13] = MS_LDQ_F32(src_data + 13 * src_step); \ + src[14] = MS_LDQ_F32(src_data + 14 * src_step); \ + src[15] = MS_LDQ_F32(src_data + 15 * src_step); + +#define Load36Data \ + src[0] = MS_LDQ_F32(src_data + 0 * src_step); \ + src[1] = MS_LDQ_F32(src_data + 1 * src_step); \ + src[2] = MS_LDQ_F32(src_data + 2 * src_step); \ + src[3] = MS_LDQ_F32(src_data + 3 * src_step); \ + src[4] = MS_LDQ_F32(src_data + 4 * src_step); \ + src[5] = MS_LDQ_F32(src_data + 5 * src_step); \ + src[6] = MS_LDQ_F32(src_data + 6 * src_step); \ + src[7] = MS_LDQ_F32(src_data + 7 * src_step); \ + src[8] = MS_LDQ_F32(src_data + 8 * src_step); \ + src[9] = MS_LDQ_F32(src_data + 9 * src_step); \ + src[10] = MS_LDQ_F32(src_data + 10 * src_step); \ + src[11] = MS_LDQ_F32(src_data + 11 * src_step); \ + src[12] = MS_LDQ_F32(src_data + 12 * src_step); \ + src[13] = MS_LDQ_F32(src_data + 13 * src_step); \ + src[14] = MS_LDQ_F32(src_data + 14 * src_step); \ + src[15] = MS_LDQ_F32(src_data + 15 * src_step); \ + src[16] = MS_LDQ_F32(src_data + 16 * src_step); \ + src[17] = MS_LDQ_F32(src_data + 17 * src_step); \ + src[18] = MS_LDQ_F32(src_data + 18 * src_step); \ + src[19] = MS_LDQ_F32(src_data + 19 * src_step); \ + src[20] = MS_LDQ_F32(src_data + 20 * src_step); \ + src[21] = MS_LDQ_F32(src_data + 21 * src_step); \ + src[22] = MS_LDQ_F32(src_data + 22 * src_step); \ + src[23] = MS_LDQ_F32(src_data + 23 * src_step); \ + src[24] = MS_LDQ_F32(src_data + 24 * src_step); \ + src[25] = MS_LDQ_F32(src_data + 25 * src_step); \ + src[26] = MS_LDQ_F32(src_data + 26 * src_step); \ + src[27] = MS_LDQ_F32(src_data + 27 * src_step); \ + src[28] = MS_LDQ_F32(src_data + 28 * src_step); \ + src[29] = MS_LDQ_F32(src_data + 29 * src_step); \ + src[30] = MS_LDQ_F32(src_data + 30 * src_step); \ + src[31] = MS_LDQ_F32(src_data + 31 * src_step); \ + src[32] = MS_LDQ_F32(src_data + 32 * src_step); \ + src[33] = MS_LDQ_F32(src_data + 33 * src_step); \ + src[34] = MS_LDQ_F32(src_data + 34 * src_step); \ + src[35] = MS_LDQ_F32(src_data + 35 * src_step); + +#define Load64Data \ + src[0] = MS_LDQ_F32(src_data + 0 * src_step); \ + src[1] = MS_LDQ_F32(src_data + 1 * src_step); \ + src[2] = MS_LDQ_F32(src_data + 2 * src_step); \ + src[3] = MS_LDQ_F32(src_data + 3 * src_step); \ + src[4] = MS_LDQ_F32(src_data + 4 * src_step); \ + src[5] = MS_LDQ_F32(src_data + 5 * src_step); \ + src[6] = MS_LDQ_F32(src_data + 6 * src_step); \ + src[7] = MS_LDQ_F32(src_data + 7 * src_step); \ + src[8] = MS_LDQ_F32(src_data + 8 * src_step); \ + src[9] = MS_LDQ_F32(src_data + 9 * src_step); \ + src[10] = MS_LDQ_F32(src_data + 10 * src_step); \ + src[11] = MS_LDQ_F32(src_data + 11 * src_step); \ + src[12] = MS_LDQ_F32(src_data + 12 * src_step); \ + src[13] = MS_LDQ_F32(src_data + 13 * src_step); \ + src[14] = MS_LDQ_F32(src_data + 14 * src_step); \ + src[15] = MS_LDQ_F32(src_data + 15 * src_step); \ + src[16] = MS_LDQ_F32(src_data + 16 * src_step); \ + src[17] = MS_LDQ_F32(src_data + 17 * src_step); \ + src[18] = MS_LDQ_F32(src_data + 18 * src_step); \ + src[19] = MS_LDQ_F32(src_data + 19 * src_step); \ + src[20] = MS_LDQ_F32(src_data + 20 * src_step); \ + src[21] = MS_LDQ_F32(src_data + 21 * src_step); \ + src[22] = MS_LDQ_F32(src_data + 22 * src_step); \ + src[23] = MS_LDQ_F32(src_data + 23 * src_step); \ + src[24] = MS_LDQ_F32(src_data + 24 * src_step); \ + src[25] = MS_LDQ_F32(src_data + 25 * src_step); \ + src[26] = MS_LDQ_F32(src_data + 26 * src_step); \ + src[27] = MS_LDQ_F32(src_data + 27 * src_step); \ + src[28] = MS_LDQ_F32(src_data + 28 * src_step); \ + src[29] = MS_LDQ_F32(src_data + 29 * src_step); \ + src[30] = MS_LDQ_F32(src_data + 30 * src_step); \ + src[31] = MS_LDQ_F32(src_data + 31 * src_step); \ + src[32] = MS_LDQ_F32(src_data + 32 * src_step); \ + src[33] = MS_LDQ_F32(src_data + 33 * src_step); \ + src[34] = MS_LDQ_F32(src_data + 34 * src_step); \ + src[35] = MS_LDQ_F32(src_data + 35 * src_step); \ + src[36] = MS_LDQ_F32(src_data + 36 * src_step); \ + src[37] = MS_LDQ_F32(src_data + 37 * src_step); \ + src[38] = MS_LDQ_F32(src_data + 38 * src_step); \ + src[39] = MS_LDQ_F32(src_data + 39 * src_step); \ + src[40] = MS_LDQ_F32(src_data + 40 * src_step); \ + src[41] = MS_LDQ_F32(src_data + 41 * src_step); \ + src[42] = MS_LDQ_F32(src_data + 42 * src_step); \ + src[43] = MS_LDQ_F32(src_data + 43 * src_step); \ + src[44] = MS_LDQ_F32(src_data + 44 * src_step); \ + src[45] = MS_LDQ_F32(src_data + 45 * src_step); \ + src[46] = MS_LDQ_F32(src_data + 46 * src_step); \ + src[47] = MS_LDQ_F32(src_data + 47 * src_step); \ + src[48] = MS_LDQ_F32(src_data + 48 * src_step); \ + src[49] = MS_LDQ_F32(src_data + 49 * src_step); \ + src[50] = MS_LDQ_F32(src_data + 50 * src_step); \ + src[51] = MS_LDQ_F32(src_data + 51 * src_step); \ + src[52] = MS_LDQ_F32(src_data + 52 * src_step); \ + src[53] = MS_LDQ_F32(src_data + 53 * src_step); \ + src[54] = MS_LDQ_F32(src_data + 54 * src_step); \ + src[55] = MS_LDQ_F32(src_data + 55 * src_step); \ + src[56] = MS_LDQ_F32(src_data + 56 * src_step); \ + src[57] = MS_LDQ_F32(src_data + 57 * src_step); \ + src[58] = MS_LDQ_F32(src_data + 58 * src_step); \ + src[59] = MS_LDQ_F32(src_data + 59 * src_step); \ + src[60] = MS_LDQ_F32(src_data + 60 * src_step); \ + src[61] = MS_LDQ_F32(src_data + 61 * src_step); \ + src[62] = MS_LDQ_F32(src_data + 62 * src_step); \ + src[63] = MS_LDQ_F32(src_data + 63 * src_step); + +InputTransFunc GetInputTransFunc(int input_unit); + +void InputTransform4x4Unit(const float *src_data, float *dst_data, int src_step, int dst_step, int real_c); + +void InputTransform6x6Unit(const float *src_data, float *dst_data, int src_step, int dst_step, int real_c); + +void InputTransform8x8Unit(const float *src_data, float *dst_data, int src_step, int dst_step, int real_c); + +OutputTransFunc GetOutputTransFunc(int input_unit, int output_unit, ActType act_type); + +#define Store4Data \ + MS_STQ_F32(dst_data, m[0]); \ + MS_STQ_F32(dst_data + out_c, m[1]); \ + MS_STQ_F32(dst_data + dst_step * out_c, m[2]); \ + MS_STQ_F32(dst_data + dst_step * out_c + out_c, m[3]); + +#define Store9Data \ + MS_STQ_F32(dst_data, m[0]); \ + MS_STQ_F32(dst_data + out_c, m[1]); \ + MS_STQ_F32(dst_data + 2 * out_c, m[2]); \ + MS_STQ_F32(dst_data + dst_step * out_c, m[3]); \ + MS_STQ_F32(dst_data + dst_step * out_c + out_c, m[4]); \ + MS_STQ_F32(dst_data + dst_step * out_c + 2 * out_c, m[5]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c, m[6]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c + out_c, m[7]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c + 2 * out_c, m[8]); + +#define Store16Data \ + MS_STQ_F32(dst_data, m[0]); \ + MS_STQ_F32(dst_data + out_c, m[1]); \ + MS_STQ_F32(dst_data + 2 * out_c, m[2]); \ + MS_STQ_F32(dst_data + 3 * out_c, m[3]); \ + MS_STQ_F32(dst_data + dst_step * out_c, m[4]); \ + MS_STQ_F32(dst_data + dst_step * out_c + out_c, m[5]); \ + MS_STQ_F32(dst_data + dst_step * out_c + 2 * out_c, m[6]); \ + MS_STQ_F32(dst_data + dst_step * out_c + 3 * out_c, m[7]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c, m[8]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c + out_c, m[9]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c + 2 * out_c, m[10]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c + 3 * out_c, m[11]); \ + MS_STQ_F32(dst_data + 3 * dst_step * out_c, m[12]); \ + MS_STQ_F32(dst_data + 3 * dst_step * out_c + out_c, m[13]); \ + MS_STQ_F32(dst_data + 3 * dst_step * out_c + 2 * out_c, m[14]); \ + MS_STQ_F32(dst_data + 3 * dst_step * out_c + 3 * out_c, m[15]); + +#define Store25Data \ + MS_STQ_F32(dst_data, m[0]); \ + MS_STQ_F32(dst_data + out_c, m[1]); \ + MS_STQ_F32(dst_data + 2 * out_c, m[2]); \ + MS_STQ_F32(dst_data + 3 * out_c, m[3]); \ + MS_STQ_F32(dst_data + 4 * out_c, m[4]); \ + MS_STQ_F32(dst_data + dst_step * out_c, m[5]); \ + MS_STQ_F32(dst_data + dst_step * out_c + out_c, m[6]); \ + MS_STQ_F32(dst_data + dst_step * out_c + 2 * out_c, m[7]); \ + MS_STQ_F32(dst_data + dst_step * out_c + 3 * out_c, m[8]); \ + MS_STQ_F32(dst_data + dst_step * out_c + 4 * out_c, m[9]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c, m[10]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c + out_c, m[11]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c + 2 * out_c, m[12]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c + 3 * out_c, m[13]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c + 4 * out_c, m[14]); \ + MS_STQ_F32(dst_data + 3 * dst_step * out_c, m[15]); \ + MS_STQ_F32(dst_data + 3 * dst_step * out_c + out_c, m[16]); \ + MS_STQ_F32(dst_data + 3 * dst_step * out_c + 2 * out_c, m[17]); \ + MS_STQ_F32(dst_data + 3 * dst_step * out_c + 3 * out_c, m[18]); \ + MS_STQ_F32(dst_data + 3 * dst_step * out_c + 4 * out_c, m[19]); \ + MS_STQ_F32(dst_data + 4 * dst_step * out_c, m[20]); \ + MS_STQ_F32(dst_data + 4 * dst_step * out_c + out_c, m[21]); \ + MS_STQ_F32(dst_data + 4 * dst_step * out_c + 2 * out_c, m[22]); \ + MS_STQ_F32(dst_data + 4 * dst_step * out_c + 3 * out_c, m[23]); \ + MS_STQ_F32(dst_data + 4 * dst_step * out_c + 4 * out_c, m[24]); + +void OutputTransform4x2Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform4x2ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform4x2Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform4x3Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform4x3ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform4x3Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); + +void OutputTransform6x2Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x2ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x2Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x3Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x3ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x3Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x4Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x4ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x4Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x5Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x5ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x5Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); + +void OutputTransform8x2Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x2ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x2Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x3Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x3ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x3Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x4Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x4ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x4Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x5Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x5ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x5Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x6ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x6Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x7Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x7ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x7Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); + +int SelectOutputUnit(ConvParameter *conv_param); + +bool CheckIfUseWinograd(int *output_unit, ConvParameter *conv_param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_WINOGRAD_UTILS_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/gather_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/gather_parameter.h new file mode 100755 index 00000000000..6ac16dcaabc --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/gather_parameter.h @@ -0,0 +1,28 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_GATHER_PARAMETER_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_GATHER_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct GatherParameter { + // Primitive parameter + OpParameter op_parameter_; + int axis_; +} GatherParameter; + +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_GATHER_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/gelu_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/gelu_parameter.h new file mode 100755 index 00000000000..7a16900665c --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/gelu_parameter.h @@ -0,0 +1,28 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_GELU_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_GELU_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct GeLUParameter { + // Primitive parameter + OpParameter op_parameter_; + bool approximate_; +} GeLUParameter; + +#endif // MINDSPORE_LITE_NNACL_GELU_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/gru_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/gru_parameter.h new file mode 100755 index 00000000000..29ebdbdc128 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/gru_parameter.h @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_GRU_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_GRU_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct GruParameter { + // Primitive parameter + OpParameter op_parameter_; + // shape correlative + int input_size_; + int hidden_size_; // output_size + int seq_len_; + int batch_; + // other parameter + int input_step_; + int output_step_; + bool bidirectional_; + int col_align_; + int row_align_; +} GruParameter; + +#endif // MINDSPORE_LITE_NNACL_GRU_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/instance_norm_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/instance_norm_parameter.h new file mode 100755 index 00000000000..988561a3e6d --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/instance_norm_parameter.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INSTANCE_NORM_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_INSTANCE_NORM_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct InstanceNormParameter { + // Primitive parameter + OpParameter op_parameter_; + float epsilon_; + // shape correlative + int batch_; + int channel_; + int inner_size_; +} InstanceNormParameter; + +#endif // MINDSPORE_LITE_NNACL_INSTANCE_NORM_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/add_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/add_int8.h new file mode 100755 index 00000000000..8663d79a679 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/add_int8.h @@ -0,0 +1,73 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_ADD_INT8_H_ +#define MINDSPORE_LITE_NNACL_ADD_INT8_H_ + +#ifdef ENABLE_AVX +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/arithmetic.h" +#include "nnacl/int8/arithmetic_int8.h" + +typedef struct AddQuantQrgs { + int32_t zp_; + int32_t left_shift_; + int32_t right_shift_; + int32_t multiplier_; +} AddQuantQrgs; + +typedef struct AddQuantParameter { + int left_shift_; + int32_t min_; + int32_t max_; + + AddQuantQrgs in0_args_; + AddQuantQrgs in1_args_; + + int32_t out_zp_; + int32_t out_left_shift_; + int32_t out_right_shift_; + int32_t out_multiplier_; +} AddQuantParameter; + +#ifdef __cplusplus +extern "C" { +#endif + +void AddInt8(const int8_t *input0, const int8_t *input1, int8_t *output, int size, AddQuantParameter *params); + +void AddOptInt8(const int8_t *ptr_in, const int8_t element_in, int8_t *output, int size, AddQuantParameter *params, + AddQuantQrgs *ptr_args, AddQuantQrgs *ele_args); + +int ElementAddInt8(const int8_t *in0, const int8_t *in1, int8_t *out, int size); + +int BroadcastAddInt8(const int8_t *in0, const int8_t *in1, int8_t *tile_in0, int8_t *tile_in1, int8_t *out, int size, + ArithmeticParameter *param); + +#ifdef ENABLE_AVX +void AddInt8_AVX2(const int8_t *input0, const int8_t *input1, int8_t *output, int size, AddQuantParameter *params); + +void AddOptInt8_AVX2(const int8_t *ptr_in, const int8_t element_in, int8_t *output, int size, AddQuantParameter *params, + AddQuantQrgs *ptr_args, AddQuantQrgs *ele_args); +#endif +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_ADD_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/arg_min_max_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/arg_min_max_int8.h new file mode 100755 index 00000000000..26854cdd346 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/arg_min_max_int8.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_INT8_ARG_MIN_MAX_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_ARG_MIN_MAX_INT8_H_ + +#include "nnacl/arg_min_max_parameter.h" +#include "nnacl/int8/quantize.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void Int8ArgMinMaxQuant(const int8_t *input, int8_t *output, const int *in_shape, ArgMinMaxParameter *param, + QuantArg *in_quant, QuantArg *out_quant); +void Int8ArgMinMaxDim0(const int8_t *input, int8_t *output, const int *in_shape, ArgMinMaxParameter *param, + QuantArg *in_quant, QuantArg *out_quant); +void Int8ArgMinMaxDim1(const int8_t *input, int8_t *output, const int *in_shape, ArgMinMaxParameter *param, + QuantArg *in_quant, QuantArg *out_quant); +void Int8ArgMinMaxDim2(const int8_t *input, int8_t *output, const int *in_shape, ArgMinMaxParameter *param, + QuantArg *in_quant, QuantArg *out_quant); +void Int8ArgMinMaxDim3(const int8_t *input, int8_t *output, const int *in_shape, ArgMinMaxParameter *param, + QuantArg *in_quant, QuantArg *out_quant); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_ARG_MIN_MAX_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/arithmetic_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/arithmetic_int8.h new file mode 100755 index 00000000000..ec9d5c6fa18 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/arithmetic_int8.h @@ -0,0 +1,51 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_INT8_ARITHMETIC_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_ARITHMETIC_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" +#include "nnacl/base/arithmetic_base.h" + +#ifdef __cplusplus +extern "C" { +#endif +void TileOneDimensionInt8(const int8_t *inData, int8_t *outData, int dim, size_t ndim, const int *inShape, + const int *inStrides, const int *outStrides, const int *multiple); +void TileDimensionsInt8(const int8_t *data0, const int8_t *data1, int8_t *tile_data0, int8_t *tile_data1, + ArithmeticParameter *param); + +int ElementNotEqualInt8(int8_t *input0, int8_t *input1, uint8_t *output, int element_size, + ArithmeticQuantArg *quant_arg); + +int ElementEqualInt8(int8_t *input0, int8_t *input1, uint8_t *output, int element_size, ArithmeticQuantArg *quant_arg); + +int ElementLessInt8(int8_t *input0, int8_t *input1, uint8_t *output, int element_size, ArithmeticQuantArg *quant_arg); + +int ElementLessEqualInt8(int8_t *input0, int8_t *input1, uint8_t *output, int element_size, + ArithmeticQuantArg *quant_arg); + +int ElementGreaterInt8(int8_t *input0, int8_t *input1, uint8_t *output, int element_size, + ArithmeticQuantArg *quant_arg); + +int ElementGreaterEqualInt8(int8_t *input0, int8_t *input1, uint8_t *output, int element_size, + ArithmeticQuantArg *quant_arg); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_ARITHMETIC_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/arithmetic_self_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/arithmetic_self_int8.h new file mode 100755 index 00000000000..78ad1e00322 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/arithmetic_self_int8.h @@ -0,0 +1,59 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INT8_ARITHMETIC_SELF_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_ARITHMETIC_SELF_INT8_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/int8/quantize.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int Int8ElementRound(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int Int8ElementFloor(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int Int8ElementCeil(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int Int8ElementAbs(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int Int8ElementSin(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int Int8ElementCos(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int Int8ElementLog(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int Int8ElementSqrt(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int Int8ElementRsqrt(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int Int8ElementSquare(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int Int8ElementLogicalNot(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int Int8ElementReciprocal(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_ARITHMETIC_SELF_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/batch_to_space_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/batch_to_space_int8.h new file mode 100755 index 00000000000..a2e0a18cbc3 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/batch_to_space_int8.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_INT8_BATCH_TO_SPACE_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_BATCH_TO_SPACE_INT8_H_ +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" + +#ifdef __cplusplus +extern "C" { +#endif +void BatchToSpaceNoCropForNHWCInt8(const int8_t *input, int8_t *output, const int *in_shape, int out_n, + const int *block, QuantArg *in_quant_arg, QuantArg *out_quant_arg); +void BatchToSpaceForNHWCInt8(const int8_t *input, int8_t *output, const int *in_shape, int out_n, const int *block, + const int *crops, QuantArg *in_quant_arg, QuantArg *out_quant_arg); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_BATCH_TO_SPACE_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/batchnorm_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/batchnorm_int8.h new file mode 100755 index 00000000000..95f59f04013 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/batchnorm_int8.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INT8_BATCHNORM_H_ +#define MINDSPORE_LITE_NNACL_INT8_BATCHNORM_H_ + +#include "nnacl/op_base.h" +#include "nnacl/batchnorm_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void BatchNormInt8(int8_t *output_ptr, const int8_t *input_ptr, const float *alpha_ptr, const float *beta_ptr, + int task_id, BatchNormParameter *param); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_BATCHNORM_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/common_func_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/common_func_int8.h new file mode 100755 index 00000000000..ae0e0c5ceda --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/common_func_int8.h @@ -0,0 +1,94 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INT8_COMMON_FUNC_H_ +#define MINDSPORE_LITE_NNACL_INT8_COMMON_FUNC_H_ + +#include +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void PostFuncInt8C4(const int32_t *in, const int32_t *bias, int8_t *out, size_t oc, size_t plane, size_t stride, + int32_t multiplier, int32_t left_shift, int32_t right_shift, int32_t zp, int32_t mini, + int32_t maxi); +#ifdef ENABLE_ARM +void ConvDwInt8Row(int32_t *output_ptr, const int8_t *input_ptr, const int16_t *weight_ptr, int num_pixels, + int output_channel, int input_step, int8_t input_zp); +void ConvDwInt8PostAlign4PerChannel(int8_t *dst, int32_t *buffer, int channel4, int32_t output_zp, + int32_t *out_multiplier, int32_t *left_shift, int32_t *right_shift, int32_t acc_min, + int32_t acc_max); +void ConvDwInt8PostAlign4(int8_t *dst, int32_t *buffer, int num_pixels, int32_t output_zp, int32_t out_multiplier, + int32_t left_shift, int32_t right_shift, int32_t acc_min, int32_t acc_max); +void IndirectGemmInt16to32_8x4(int32_t *dst, const int16_t *src, const int16_t *weight, size_t ksize, size_t ic8, + size_t oc4, size_t offset); +void ConvDwInt8Center(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, size_t height, + size_t width, size_t kernel_h, size_t kernel_w, size_t out_h_step, size_t block_channel, + size_t in_sh_step, size_t in_sw_step, size_t in_kh_step, size_t in_kw_step, int8_t *in_zp, + int32_t *out_zp, int32_t *out_multiplier, int32_t *left_shift, int32_t *right_shift, + int32_t *acc_min, int32_t *acc_max); +void DeconvDwInt8Center(int32_t *dst, const int16_t *src, const int16_t *weight, size_t height, size_t width, + size_t kernel_h, size_t kernel_w, size_t out_h_step, size_t block_channel, size_t in_sh_step, + size_t in_sw_step, size_t in_kh_step, size_t in_kw_step); +void DeconvDwInt8Post(int8_t *dst, int32_t *output_buffer, const int32_t *bias, int block_channel, int pixel_nums, + int out_multiplier, int left_shift, int right_shift, int32_t out_zp, int32_t acc_min, + int32_t acc_max); +int16x8_t LoadAndAddOffset(int8_t *data, int index, int offset); +int32x4_t ClacScaledInput(int32x4_t input, int32x4_t left_shift_result_vec, int32x4_t input_multiplier_vec, + int32x4_t right_shift_vec); +#endif + +#ifdef ENABLE_ARM32 +void ConvDw3x3Int8BorderPixel(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, int height, + int width, int in_kh_step, int in_kw_step, int channel, int8_t in_zp, int32_t out_zp, + int32_t *out_multiplier, int32_t *left_shift, int32_t *right_shift, int32_t acc_min, + int32_t acc_max, size_t per_channel); +#endif + +#ifdef ENABLE_ARM64 +void PostFuncInt8C4Neon64(const int32_t *in, const int32_t *bias, int8_t *out, size_t oc4div, size_t oc4res, + size_t plane, size_t stride, int32_t multiplier, int32_t left_shift, int32_t right_shift, + int32_t zp, int32_t mini, int32_t maxi); +void ConvDw3x3Int8Neon64(int8_t *output, const int8_t *input, const int16_t *weight, const int32_t *bias, + int input_col_size, int input_row_size, int channel, int output_h, int output_w, int8_t in_zp, + int32_t out_zp, int32_t *out_multiplier, int32_t *left_shift, int32_t *right_shift, + int32_t acc_min, int32_t acc_max, size_t per_channel); +void ConvDw3x3Int8Stride2(int8_t *output, const int8_t *input, const int16_t *weight, const int32_t *bias, + int input_col_size, int input_row_size, int channel, int output_h, int output_w, int8_t in_zp, + int32_t out_zp, int32_t *out_multiplier, int32_t *left_shift, int32_t *right_shift, + int32_t acc_min, int32_t acc_max, size_t per_channel); +void ConvDw3x3Int8Corner(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, size_t in_kh_step, + size_t in_kw_step, size_t channel, size_t in_zp, size_t out_zp, int32_t *out_multiplier, + int32_t *left_shift, int32_t *right_shift, size_t acc_min, size_t acc_max, size_t per_channel); +void ConvDw3x3Int8Vertical(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, + size_t in_kh_step, size_t in_kw_step, size_t channel, size_t in_zp, size_t out_zp, + int32_t *out_multiplier, int32_t *left_shift, int32_t *right_shift, size_t acc_min, + size_t acc_max, size_t per_channel); +void ConvDw3x3Int8Horizontal(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, + size_t in_kh_step, size_t in_kw_step, size_t channel, size_t in_zp, size_t out_zp, + int32_t *out_multiplier, int32_t *left_shift, int32_t *right_shift, size_t acc_min, + size_t acc_max, size_t per_channel); +#endif +#ifdef __cplusplus +} +#endif + +#endif /* MINDSPORE_LITE_NNACL_FP32_COMMON_FUNC_H_ */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/concat_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/concat_int8.h new file mode 100755 index 00000000000..2c2f9d05d5d --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/concat_int8.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INT8_CONCAT_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_CONCAT_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/concat_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void Int8Concat(int8_t **inputs, int8_t *output_ptr, ConcatParameter *para, int axis, int64_t real_dst_count, + int task_id); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_CONCAT_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/conv1x1_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/conv1x1_int8.h new file mode 100755 index 00000000000..ec2ef268f51 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/conv1x1_int8.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_INT8_CONV1X1_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_CONV1X1_INT8_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/pack.h" +#include "nnacl/op_base.h" +#include "nnacl/common_func.h" +#include "nnacl/conv_parameter.h" +#include "nnacl/int8/quantize.h" +#include "nnacl/matmul_parameter.h" +#include "nnacl/int8/matmul_int8.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void Conv1x1Int8(const int8_t *packed_input, const int8_t *packed_weight, int8_t *dst, const int32_t *input_sum, + const int32_t *bias, int row, int col, int deep16, int32_t *left_shift, int32_t *right_shift, + int32_t *multiplier, ConvParameter *conv_param, int32_t *filter_zp); +void Conv1x1Int8Opt(const int8_t *packed_input, const int8_t *packed_weight, int8_t *dst, const int32_t *input_sum, + const int32_t *bias, int row, int col, int deep4, int32_t *left_shift, int32_t *right_shift, + int32_t *multiplier, ConvParameter *conv_param, MATMUL_OPT_DP_FUNC matmul_func, int32_t *filter_zp); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_CONV1X1_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/conv3x3_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/conv3x3_int8.h new file mode 100755 index 00000000000..5c1c9818a26 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/conv3x3_int8.h @@ -0,0 +1,48 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_INT8_CONV_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_CONV_INT8_H_ + +#include +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/pack.h" +#include "nnacl/op_base.h" +#include "nnacl/common_func.h" +#include "nnacl/conv_parameter.h" +#include "nnacl/int8/fixed_point.h" +#include "nnacl/int8/quantize.h" +#include "nnacl/matmul_parameter.h" +#include "nnacl/int8/matmul_int8.h" +#include "nnacl/int8/common_func_int8.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void Conv3x3Int8FilterTransform(const int16_t *weight_data, int16_t *trans_weight, int iC8, int output_channel, + int kernel_plane); + +void Conv3x3Int8(int16_t *input_data, int16_t *transed_weight, const int32_t *bias_data, int8_t *output_data, + int16_t *tile_buffer, int16_t *block_unit_buffer, int32_t *tmp_dst_buffer, int8_t *tmp_out, + int task_id, ConvParameter *conv_param); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_CONV_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/conv_depthwise_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/conv_depthwise_int8.h new file mode 100755 index 00000000000..9ff845a3e7c --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/conv_depthwise_int8.h @@ -0,0 +1,49 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INT8_CONV_DEPTHWISE_H_ +#define MINDSPORE_LITE_NNACL_INT8_CONV_DEPTHWISE_H_ + +#include "nnacl/conv_parameter.h" +#include "nnacl/fp32/conv_depthwise_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void ConvDwInt8(int8_t *output_data, int32_t *output_row, const int8_t *input_data, const int16_t *weight_data, + const int32_t *bias_data, const ConvParameter *conv_param, int task_id); + +void ConvDw3x3Int8Pad(int8_t *output_data, const int8_t *input_data, const int16_t *weight_data, + const int32_t *bias_data, const ConvParameter *conv_param, const SlidingWindowParam *sliding); + +void ConvDw3x3Int8(int8_t *output_data, int8_t *buffer, const int8_t *input_data, const int16_t *weight_data, + const int32_t *bias_data, const ConvParameter *conv_param, const SlidingWindowParam *sliding, + int task_id); + +void ConvDwInt8SW(int8_t *output_data, const int8_t *input_data, const int16_t *weight_data, const int32_t *bias_data, + int8_t *input_zp, int32_t *output_zp, const ConvParameter *conv_param, + const SlidingWindowParam *sliding, int task_id); + +void DeconvDwInt8(int8_t *output_data, int32_t *output_buffer, const int16_t *input_data, const int16_t *weight_data, + const int32_t *bias_data, const ConvParameter *conv_param, const SlidingWindowParam *sliding, + int task_id); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_CONV_DEPTHWISE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/conv_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/conv_int8.h new file mode 100755 index 00000000000..bc0aab94db6 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/conv_int8.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_INT8_CONV_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_CONV_INT8_H_ + +#include +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/pack.h" +#include "nnacl/op_base.h" +#include "nnacl/common_func.h" +#include "nnacl/conv_parameter.h" +#include "nnacl/int8/quantize.h" +#include "nnacl/matmul_parameter.h" +#include "nnacl/int8/matmul_int8.h" +#include "nnacl/int8/common_func_int8.h" + +#ifdef __cplusplus +extern "C" { +#endif +// int8 conv common +void ConvInt8(int8_t *input_data, int8_t *packed_input, int8_t *matmul_input, int8_t *packed_weight, + const int32_t *bias_data, int8_t *output_data, int32_t *filter_zp, int32_t *input_sum, int task_id, + ConvParameter *conv_param, MATMUL_OPT_R_FUNC matmul_func, bool is_optimize); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_CONV_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/crop_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/crop_int8.h new file mode 100755 index 00000000000..77c2adef40a --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/crop_int8.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INT8_CROP_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_CROP_INT8_H_ +#include "nnacl/op_base.h" +#include "nnacl/crop_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void Int8Crop(const int8_t *input, int8_t *output, int task_id, CropParameter *para); +void Int8Crop1D(const int8_t *input, int8_t *output, int task_id, CropParameter *para); +void Int8Crop2D(const int8_t *input, int8_t *output, int task_id, CropParameter *para); +void Int8Crop3D(const int8_t *input, int8_t *output, int task_id, CropParameter *para); +void Int8Crop4D(const int8_t *input, int8_t *output, int task_id, CropParameter *para); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_CROP_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/deconv_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/deconv_int8.h new file mode 100755 index 00000000000..339f6cb40e0 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/deconv_int8.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_INT8_DECONV_H_ +#define MINDSPORE_LITE_NNACL_INT8_DECONV_H_ + +#include +#include "nnacl/pack.h" +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/conv_parameter.h" +#include "nnacl/common_func.h" +#include "nnacl/int8/matmul_int8.h" + +#ifdef __cplusplus +extern "C" { +#endif +void DeConvPackWeightSum(int8_t *weight, int32_t *weight_sum, int32_t input_zp, int32_t filter_zp, int deep16, int col4, + bool suppport_opt); +void DeConvPackInputSum(const int8_t *src, int32_t *dst, int32_t filter_zp, size_t row4, size_t col16, + bool suppport_opt); +void DeConvWeightTransInt8(int8_t *src, int8_t *dst, int input_channel, int output_channel, int plane, + bool support_optimize_); + +int DeConvInt8(const int8_t *input, const int8_t *weight, int32_t *output, int32_t *weight_sum, int32_t *input_sum, + size_t act_row, size_t act_col, size_t act_deep, ConvParameter *conv_param, + MATMUL_OPT_R4_FUNC matmul_func); +int DeConvPostInt8(const int32_t *src, const int32_t *bias, int32_t *tmp, int8_t *out, int output_channel, + ConvParameter *conv_param, bool support_optimize); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_DECONV_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/depth_to_space_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/depth_to_space_int8.h new file mode 100755 index 00000000000..5af96940d58 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/depth_to_space_int8.h @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_INT8_DEPTH_TO_SPACE_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_DEPTH_TO_SPACE_INT8_H_ + +#include "nnacl/depth_to_space_parameter.h" +#include "nnacl/int8/quantize.h" + +#ifdef __cplusplus +extern "C" { +#endif +void DepthToSpaceForNHWCInt8(const int8_t *input, int8_t *output, const int *in_shape, DepthToSpaceParameter *param, + QuantArg *in_quant_arg, QuantArg *out_quant_arg); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_DEPTH_TO_SPACE_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/div_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/div_int8.h new file mode 100755 index 00000000000..8522f7247cd --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/div_int8.h @@ -0,0 +1,33 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INT8_DIV_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_DIV_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/int8/quantize.h" +#include "nnacl/int8/fixed_point.h" + +#ifdef __cplusplus +extern "C" { +#endif +int DivInt8(int8_t *input0_data, int8_t *input1_data, int8_t *output_data, int64_t real_dst_count, DivQuantArg *para); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_DIV_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/fixed_point.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/fixed_point.h new file mode 100755 index 00000000000..ed106b184d6 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/fixed_point.h @@ -0,0 +1,74 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_QUANTIZATION_FIXED_POINT_H_ +#define MINDSPORE_LITE_NNACL_QUANTIZATION_FIXED_POINT_H_ + +#include +#include +#ifdef ENABLE_NEON +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +// returns the high-32 bits of a * b with rounding +// assume that a and b is divided by 2^31, who fall into [-1, 1] +// so the mantissa of a * b is (a / 2^31) * (b / 2^31) * 2^31= (a * b) / 2^31 +// actually we compute 2 * a * b / 2^32 +// and take 32 bits of mantissa for rounding +int SaturatingRoundingDoublingHighMul(int a, int b); + +int16_t SaturatingRoundingDoublingHighMulInt16(int16_t a, int16_t b); + +// division by a 2^exponent with rounding +// or arithmetic right shift with rouding +int RoundingDivideByPOT(int x, int exponent); + +int UpwardRounding(int x, int exponent); + +int MultiplyByQuantizedMultiplier(int32_t value, int32_t multiplier, int32_t left_shift, int32_t right_shift); + +int MultiplyByQuantizedMultiplierWithUpwardRounding(int32_t value, int32_t multiplier, int32_t left_shift, + int32_t right_shift); + +int MultiplyByMultiplierAndRightShift(int32_t value, int32_t multiplier, int32_t right_shift); + +int SaturatingRoundingMultiplyByPOT(int32_t x, int exponent); + +int32_t Rescale(int x, int kIntegerBitsSrc, int kIntegerBitsDst); + +int CountLeadingSignBits(int32_t x); + +int32_t ComputerReciprocal(int32_t x, int x_digits, int *recip_shift); + +int exp_on_negative_values(int a, const int tIntegerBits); + +void GetSqrtQuantMultiplierExp(int32_t input, int reverse_shift, int32_t *multiplier, int32_t *shift); + +#ifdef __cplusplus +} +#endif + +#ifdef ENABLE_NEON +int32x4_t RoundingDivideByPOTInt32x4(int32x4_t x, int exponent); + +int32x4_t SaturatingRoundingDoublingHighMulInt32x4(int32x4_t a, int32x4_t b); +#endif + +#endif // MINDSPORE_LITE_NNACL_QUANTIZATION_FIXED_POINT_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/gatherNd_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/gatherNd_int8.h new file mode 100755 index 00000000000..16f9749a46f --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/gatherNd_int8.h @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_INT8_GATHERND_INT8_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_INT8_GATHERND_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" + +#ifdef __cplusplus +extern "C" { +#endif +int GatherNdInt8(int8_t *in_data, int8_t *out_data, const int *in_offset, int area, int count, GatherQuantArg param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_INT8_GATHERND_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/gather_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/gather_int8.h new file mode 100755 index 00000000000..6666188cb78 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/gather_int8.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_INT8_GATHER_INT8_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_INT8_GATHER_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" + +#ifdef __cplusplus +extern "C" { +#endif +int GatherInt8(int8_t *in_data, int8_t *out_data, int outer_size, int inner_size, int limit, const int *indices, + int indices_element_size, GatherQuantArg para); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_INT8_GATHER_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/hswish_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/hswish_int8.h new file mode 100755 index 00000000000..fd18fe43716 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/hswish_int8.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_INT8_HSWISH_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_HSWISH_INT8_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/int8/fixed_point.h" + +typedef struct HswishQuantArg { + double input_scale; + int32_t input_zp; + double output_scale; + int32_t output_zp; + int16_t relu6_multiplier_fixedpoint_int16; + int32_t relu6_multiplier_exponent; + int16_t output_multiplier_fixedpoint_int16; + int32_t output_multiplier_exponent; +} HswishQuantArg; + +#ifdef __cplusplus +extern "C" { +#endif +int HSwishInt8(const int8_t *src, int length, int8_t *dst, HswishQuantArg *arg); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_HSWISH_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/l2_norm_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/l2_norm_int8.h new file mode 100755 index 00000000000..b26fa8bf76a --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/l2_norm_int8.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_INT8_L2_NORM_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_L2_NORM_INT8_H_ + +#include "nnacl/l2_norm_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int L2NormalizationInt8(const int8_t *input_data, int8_t *output_data, const L2NormParameter *param, + const L2NormQuantArg *quant_param, const int begin, const int end); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_L2_NORM_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/layer_norm_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/layer_norm_int8.h new file mode 100755 index 00000000000..3e23478ffae --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/layer_norm_int8.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_INT8_LAYER_NORM_H_ +#define MINDSPORE_LITE_NNACL_INT8_LAYER_NORM_H_ + +#include "nnacl/errorcode.h" +#include "nnacl/layer_norm_parameter.h" +#include "nnacl/int8/fixed_point.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int LayerNormInt8(const int8_t *src_data, const float *gamma_data, const float *beta_data, int8_t *dst_data, + LayerNormParameter *param, LayerNormQuantArg *quant, int task_id); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_LAYER_NORM_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/leaky_relu_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/leaky_relu_int8.h new file mode 100755 index 00000000000..ee8808f3a25 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/leaky_relu_int8.h @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_PRELU_INT8_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_PRELU_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" + +#ifdef __cplusplus +extern "C" { +#endif +int DoLeakReluInt8(int8_t *inputs, int8_t *output_ptr, LeakyReluQuantArg *quant_Prelu_parm, int task_id); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_PRELU_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/matmul_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/matmul_int8.h new file mode 100755 index 00000000000..c10c1b6149d --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/matmul_int8.h @@ -0,0 +1,83 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INT8_MATMUL_H_ +#define MINDSPORE_LITE_NNACL_INT8_MATMUL_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/matmul_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +/* 4x16 16x4 -> 4x4 */ +/* matmul */ +void MatMulInt8_16x4(const int8_t *a, const int8_t *b, int *dst, int row_4, int col_4, int deep_16, + const int *input_sum, const int *bias); +void RowMajor2Row16x4MajorInt8(int8_t *src_ptr, int8_t *dst_ptr, int row, int col); +void RowMajor2Col16x4MajorInt8(int8_t *src, int row, int col, int8_t *dst); +void CalcInputSums(int8_t *input, int row, int col, int weight_zp, int *dst, DataOrder order); +void CalcWeightBiasSums(int8_t *weight, int row, int col, int input_zp, int *weight_zp_ptr, const int *bias, int *dst, + DataOrder order, bool filter_per_channel); +void MatmulInt8Opt(const int8_t *a, const int8_t *b, int8_t *dst, int row, int col, int deep16, const int *a_sums, + const int *bias, int act_min, int act_max, int out_zp, int32_t *multiplier, int32_t *left_shift, + int32_t *right_shift, size_t stride, size_t filter_peroc, int32_t *filter_zp); + +/* 8x4 4x8 -> 8x8 */ +/* optimize conv */ +void RowMajor2Row8x4MajorInt8(const int8_t *src_ptr, int8_t *dst_ptr, int row, int col); +void MatMulInt8_8x8_r(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_4, + size_t stride, const int32_t *input_sum, const int32_t *bias, int32_t *left_shift, + int32_t *right_shift, int32_t *multiplier, int32_t output_zp, int32_t mini, int32_t maxi, + size_t per_channel); + +/* 4x16 16x2 -> 4x2 */ +/* arm32 conv1x1 */ +void RowMajor2Row2x16MajorInt8(int8_t *src_ptr, int8_t *dst_ptr, int row, int col); +void MatMulInt8_4x2_r(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_16, + size_t stride, const int32_t *input_sum, const int32_t *bias, int32_t *left_shift, + int32_t *right_shift, int32_t *multiplier, int32_t output_zp, int32_t mini, int32_t maxi, + bool peroc); + +/* 4x4 4x16 -> 4x16 */ +/* optimize conv1x1 */ +void RowMajor2Row4x16MajorInt8(const int8_t *src_ptr, int8_t *dst_ptr, int row, int col); +void PackInput4x4AndInputSumPert(const int8_t *src_input, int8_t *packed_input, int32_t *input_sum, + size_t input_channel, size_t plane_size, int32_t filter_zp); +void MatMulInt8_4x16_r(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_4, + size_t stride, const int32_t *input_sum, const int32_t *bias, int32_t *left_shift, + int32_t *right_shift, int32_t *multiplier, int32_t output_zp, int32_t mini, int32_t maxi, + size_t per_channel, int32_t *filter_zp); + +#ifdef ENABLE_ARM64 +void MatmulInt8Neon64(const int8_t *a, const int8_t *b, int8_t *dst, int row4, int col4, int deep16, const int *a_sums, + const int *bias, int act_min, int act_max, int out_zp, int32_t *multiplier, int32_t *left_shift, + int32_t *right_shift, int row, int col, int stride, int filter_peroc); + +void MatMulR4Int8Neon64(const int8_t *a, const int8_t *b, int32_t *dst, int row4, int col4, int deep16, + const int *input_sum, const int *bias); +#endif +#ifdef ENABLE_ARM32 +void MatmulInt8Neon32(const int8_t *a, const int8_t *b, int8_t *dst, int row, int col, int deep16, + const int *input_sums, const int *weight_bias, int act_min, int act_max, int out_zp, + int *multiplier, int *left_shift, int *right_shift, int stride, int per_channel); +#endif +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_SRC_BACKEND_ARM_NNACL_INT8_MATMUL_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/mul_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/mul_int8.h new file mode 100755 index 00000000000..61ccfd89181 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/mul_int8.h @@ -0,0 +1,38 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INT8_MUL_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_MUL_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/mul_parameter.h" +#include "nnacl/int8/common_func_int8.h" +#include "nnacl/int8/fixed_point.h" +#ifdef ENABLE_NEON +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif +void Mul(int8_t *input0_data, int8_t *input1_data, int8_t *output_data, int64_t real_dst_count, MulQuantArg para); +void FastMul(int8_t *input0_data, int8_t *input1_data, int8_t *output_data, int depth, int64_t real_dst_count, + bool input1_broad, MulQuantArg para); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_MUL_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/pack_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/pack_int8.h new file mode 100755 index 00000000000..ea51ac89adc --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/pack_int8.h @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INT8_PACK_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_PACK_INT8_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/int8/matmul_int8.h" +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void PackNHWCToNHWC4Int8(const void *src, void *dst, int batch, int plane, int channel); +void PackNHWC4ToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel); +void PackNHWCToNHWC8Int8(const void *src, void *dst, int batch, int plane, int channel); +void PackNHWC8ToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel); +void PackNCHWToNC8HW8Int8(const void *src, void *dst, int batch, int plane, int channel); +void PackNC4HW4ToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel); +void PackNHWCToC8HWN8Int8(const void *src, void *dst, int batch, int plane, int channel); +void PackNCHWToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel); +void PackNHWCToNCHWInt8(const void *src, void *dst, int batch, int plane, int channel); + +void PackInputSum16x4Int8(const int8_t *input, int32_t *input_sum, int32_t *filter_zp, ConvParameter *conv_param); +void PackInputSum16x4PerLayer(const int8_t *src, int32_t *dst, int32_t filter_zp, size_t row4, size_t col16); +void PackInputToC8Int8(const int8_t *input_data, int16_t *packed_input, ConvParameter *conv_param); +void PackWeightToC8Int8(const int8_t *origin_weight_data, int16_t *packed_weight_data, ConvParameter *conv_param); +void Im2ColPackUnitInt8Opt(const int8_t *input_data, int8_t *packed_input, int8_t *matmul_input, int real_cal_num, + int block_index, int32_t *filter_zp, int32_t *input_sum, ConvParameter *conv_param, + bool per_channel, bool is_optimize); +#ifdef ENABLE_ARM +void PreSum4x16Int8Pert(const int8_t *src, int32_t *sum, size_t row4, size_t col16, int32_t filter_zp); +void PreSum4x16Int8Peroc(const int8_t *src, int32_t *sum, int32_t *zp, size_t hw4, size_t ic16, int32_t oc_div, + size_t oc_res, size_t stride); +#endif + +void PackDepthwiseInt8Input(const int8_t *src, int16_t *dst, const ConvParameter *conv_param); +void PackDepthwiseInt8Weight(const int8_t *origin_weight, int16_t *packed_weight_, int plane, int channel, + ConvQuantArg *quant_qrg); +void PackDeconvDepthwiseInt8Weight(const int8_t *origin_weight, int16_t *packed_weight_, int plane, int channel, + ConvQuantArg *quant_qrg); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_PAD_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/pad_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/pad_int8.h new file mode 100755 index 00000000000..41da1655586 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/pad_int8.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INT8_PAD_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_PAD_INT8_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/pad_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +int PadConstant4D(const int8_t *in_data, int8_t *out_data, const int32_t *in_dims, const int32_t *out_dims, + const int32_t *paddings, const int tid, const int thread_num); +void MirrorPadInt8(const int8_t *input_data, int8_t *output_data, const int *input_shape, const PadParameter *pad_param, + int begin, int end); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_PAD_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/pooling_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/pooling_int8.h new file mode 100755 index 00000000000..623aa05dbd8 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/pooling_int8.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INT8_POOLING_H_ +#define MINDSPORE_LITE_NNACL_INT8_POOLING_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/fp32/pooling_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif +#define MAX_MAXPOOL_SIZE 256 + +int AvgPoolingInt8(const int8_t *input_ptr, int8_t *output_ptr, PoolingParameter *pooling_param, int task_id); + +int AvgPoolingOptInt8(const int8_t *input_ptr, int8_t *output_ptr, PoolingParameter *pooling_param, int task_id); + +void MaxPoolingInt8(const int8_t *input_ptr, int8_t *output_ptr, PoolingParameter *pooling_param, int task_id); + +void MaxPoolingWithQuantInt8(const int8_t *input_ptr, int8_t *output_ptr, PoolingParameter *pooling_param, int task_id); + +void MaxPoolingOptInt8(const int8_t *input_ptr, int8_t *output_ptr, PoolingParameter *pooling_param, int task_id); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_POOLING_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/power_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/power_int8.h new file mode 100755 index 00000000000..86fe6509801 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/power_int8.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INT8_POWER_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_POWER_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/power_parameter.h" +#include "nnacl/int8/quantize.h" + +#ifdef __cplusplus +extern "C" { +#endif +int PowerInt8(const int8_t *input_ptr, int8_t *exp_ptr, int8_t *output_ptr, int count, PowerParameter *parameter); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_POWER_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/quant_dtype_cast_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/quant_dtype_cast_int8.h new file mode 100755 index 00000000000..cc61782c6b6 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/quant_dtype_cast_int8.h @@ -0,0 +1,42 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INT8_QUANTDTYPECAST_H_ +#define MINDSPORE_LITE_NNACL_INT8_QUANTDTYPECAST_H_ + +#include "nnacl/op_base.h" + +typedef struct QuantDTypeCastParameter { + OpParameter op_parameter_; + int32_t srcT; + int32_t dstT; +} QuantDTypeCastParameter; + +#ifdef __cplusplus +extern "C" { +#endif +int DoDequantizeInt8ToFp32(const int8_t *quant_values, float *real_values, float scale, int32_t zp, int size); +int DoQuantizeFp32ToInt8(const float *real_values, int8_t *quant_values, float scale, int32_t zp, int size, + bool uint8_flag); +int DoDequantizeUInt8ToFp32(const uint8_t *quant_values, float *real_values, float scale, int32_t zp, int size); +int DoQuantizeFp32ToUInt8(const float *real_values, uint8_t *quant_values, float scale, int32_t zp, int size); +int Int8ToUInt8(const int8_t *quant_values, uint8_t *real_values, int size); +int UInt8ToInt8(const uint8_t *real_values, int8_t *quant_values, int size); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_QUANTDTYPECAST_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/quantize.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/quantize.h new file mode 100755 index 00000000000..06a4b5fa550 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/quantize.h @@ -0,0 +1,219 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_QUANTIZATION_QUANTIZE_H_ +#define MINDSPORE_LITE_NNACL_QUANTIZATION_QUANTIZE_H_ + +#include +#include +#include "nnacl/op_base.h" + +#define INPUT_PER_CHANNEL 0b001 +#define FILTER_PER_CHANNEL 0b010 +#define OUTPUT_PER_CHANNEL 0b100 + +typedef struct ConvQuantArg { + RoundingMode round_mode_; + CalFixedMultiplierMode quant_multiplier_mode_; + QuantArg *input_quant_args_; + QuantArg *filter_quant_args_; + QuantArg *output_quant_args_; + double *real_multiplier_; + int32_t *left_shift_; + int32_t *right_shift_; + int32_t *quant_multiplier_; + int32_t *out_act_min_; + int32_t *out_act_max_; + size_t input_arg_num_; + size_t filter_arg_num_; + size_t output_arg_num_; + uint8_t per_channel_; +} ConvQuantArg; + +typedef struct ConcatQuantArg { + QuantArg *in_args_; + QuantArg out_args_; + int8_t output_activation_min_; + int8_t output_activation_max_; +} ConcatQuantArg; + +typedef struct PreluQuantArg { + int *input_sizes_; + int output_size_; + int **input_shapes_; + int *output_shape_; + size_t input_num_; + size_t output_dim_; + float alpha_; + QuantArg in_args_; + QuantArg out_args_; + int output_activation_min_; + int output_activation_max_; + QuantArg *in_quant_args_; + QuantArg out_quant_args_; +} PreluQuantArg; + +typedef struct CropQuantArg { + QuantArg in_args_; + QuantArg out_args_; + int output_activation_min_; + int output_activation_max_; +} CropQuantArg; + +typedef struct ArithSelfQuantArg { + QuantArg in_args_; + QuantArg out_args_; + int output_activation_min_; + int output_activation_max_; + int output_multiplier_; + int shift_left_; + int shift_right_; +} ArithSelfQuantArg; + +typedef struct GatherQuantArg { + double alpha_; + int zp_in_; + int zp_out_; +} GatherQuantArg; + +typedef struct SoftmaxQuantArg { + QuantArg in_quant_args_; + QuantArg out_quant_arg_; + int output_activation_min_; + int output_activation_max_; + int output_multiplier_; + int shift_left_; + int shift_right_; +} SoftmaxQuantArg; + +typedef struct SubQuantArg { + QuantArg in0_args_; + QuantArg in1_args_; + QuantArg out_args_; + int output_activation_min_; + int output_activation_max_; + int input0_multiplier_; + int input1_multiplier_; + int output_multiplier_; + int input0_shift_; + int input1_shift_; + int output_shift_; + int left_shift_result0_; + int left_shift_result1_; + int right_shift0_; + int right_shift1_; + int left_shift_out_; + int right_shift_out_; +} SubQuantArg; + +typedef struct ArithmeticQuantArg { + QuantArg in0_args_; + QuantArg in1_args_; + QuantArg out_args_; +} ArithmeticQuantArg; + +typedef struct DivQuantArg { + QuantArg in0_args_; + QuantArg in1_args_; + QuantArg out_args_; + int output_activation_min_; + int output_activation_max_; + int output_multiplier_; + int output_shift_; +} DivQuantArg; + +typedef struct ReduceQuantArg { + double in_scale_; + int32_t in_zp_; + double out_scale_; + int32_t out_zp_; + int32_t in_out_multiplier_; + int in_out_left_shift_; + int in_out_right_shift_; + int32_t mean_multiplier_; + int mean_left_shift_; + int mean_right_shift_; + int32_t prod_multiplier_; + int prod_left_shift_; + int prod_right_shift_; + int32_t sum_square_multiplier_; + int sum_square_left_shift_; + int sum_square_right_shift_; +} ReduceQuantArg; + +typedef struct LeakyReluQuantArg { + OpParameter op_parameter_; + PreluQuantArg quant_arg; + float slope_; + int64_t axis_; + int *in_shape_; + int *out_shape_; + int input_dim_; + int element_num; +} LeakyReluQuantArg; + +typedef struct ResizeQuantArg { + int32_t ratio_x_; + int32_t ratio_y_; + int32_t *x_axis_index_; + int32_t *x_axis_lower_; + int32_t *x_axis_upper_; + int32_t *y_axis_index_; + int32_t *y_axis_lower_; + int32_t *y_axis_upper_; +} ResizeQuantArg; + +typedef struct ResizeFloatScaleQuantArg { + float ratio_x_; + float ratio_y_; + float *x_axis_index_; + int32_t *x_axis_lower_; + int32_t *x_axis_upper_; + float *y_axis_index_; + int32_t *y_axis_lower_; + int32_t *y_axis_upper_; +} ResizeFloatScaleQuantArg; + +#ifdef __cplusplus +extern "C" { +#endif + +void QuantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift); + +void QuantizeMultiplierSmallerThanOne(double double_multiplier, int32_t *quantized_multiplier, int *right_shift); + +void QuantizeRoundParameterWithDoublePrecision(double double_multiplier, int32_t *quantized_multiplier, int *left_shift, + int *right_shift); + +void QuantizeRoundParameterWithSinglePrecision(double double_multiplier, int32_t *quantized_multiplier, int *left_shift, + int *right_shift); + +uint8_t QuantizeToUint8(float real_value, float scale, int32_t zp); + +int32_t QuantizeToInt8(float real_value, float scale, int32_t zp); + +void CalculateActivationRangeQuantized(bool is_relu, bool is_relu6, int32_t zp, float scale, int *mini, int *maxi); +// quantize from float to int8 +void Quantize(const float *input_data, int length, float scale, int zero_point, int8_t *output_data); + +// dequantize from int8 to float +void Dequantize(int8_t *input_data, int length, float scale, int zero_point, float *output_data); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_QUANTIZATION_QUANTIZE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/reduce_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/reduce_int8.h new file mode 100755 index 00000000000..44d845f5ff9 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/reduce_int8.h @@ -0,0 +1,70 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INT8_REDUCE_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_REDUCE_INT8_H_ + +#include "nnacl/int8/quantize.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ReduceMeanN(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanH(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanW(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanNH(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanNW(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanNC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanHW(int n, int plane, int count, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg, + int32_t bias); +int ReduceMeanHC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanWC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanNHW(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanNHC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanNWC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanHWC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanNHWC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); + +int ReduceMeanInt8(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int32_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +int ReduceMeanLastAxis(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int8_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +int ReduceSumInt8(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int32_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +int ReduceSumLastAxis(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int8_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +int ReduceMaxInt8(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int32_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +int ReduceMaxLastAxis(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int8_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +int ReduceMinInt8(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int32_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +int ReduceMinLastAxis(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int8_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +int ReduceProdLastAxis(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int8_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +int ReduceProdInt8(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int32_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +int ReduceSumSquareLastAxis(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int8_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +int ReduceSumSquareInt8(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int32_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_INT8_REDUCE_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/relux_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/relux_int8.h new file mode 100755 index 00000000000..78b78596e82 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/relux_int8.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_INT8_RELU_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_RELU_INT8_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/int8/fixed_point.h" +#include "nnacl/int8/quantize.h" + +typedef struct ReluXQuantArg { + QuantArg input_arg; + QuantArg output_arg; + int input_multiplier_; + int left_shift_; + int right_shift_; + int quantized_output_min; + int quantized_output_max; +} ReluXQuantArg; + +#ifdef __cplusplus +extern "C" { +#endif +void ReluXInt8(const int8_t *src, int length, int8_t *dst, ReluXQuantArg *arg); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_RELU_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/reshape_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/reshape_int8.h new file mode 100755 index 00000000000..5e88d859881 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/reshape_int8.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INT8_RESHAHPE_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_RESHAHPE_INT8_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/reshape_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void Int8Reshape(int8_t *input_ptr, int8_t *output_ptr, int64_t real_dst_count, ReshapeQuantArg para); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_RESHAHPE_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/resize_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/resize_int8.h new file mode 100755 index 00000000000..49a328262a3 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/resize_int8.h @@ -0,0 +1,50 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_INT8_RESIZE_H_ +#define MINDSPORE_LITE_NNACL_INT8_RESIZE_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" +#include "nnacl/resize_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +int ResizeBilinearInt8(const int8_t *input_ptr, int8_t *output_ptr, int batch, int in_h, int in_w, int out_h, int out_w, + int channel, int index, int count, ResizeQuantArg quant_arg); + +int ResizeBilinearWithFloatScaleInt8(const int8_t *input_ptr, int8_t *output_ptr, int batch, int in_h, int in_w, + int out_h, int out_w, int channel, int index, int count, + ResizeFloatScaleQuantArg quant_arg); + +int ResizeNearestNeighborInt8Simple(const int8_t *input_data, int8_t *output_data, const int *input_shape, + const int *output_shape, const bool align_corners, int tid, int thread_num); + +int ResizeNearestNeighborInt8(const int8_t *input_data, int8_t *output_data, const int *input_shape, + const int *output_shape, const bool align_corners, const QuantMulArg *multiplier, + QuantArg *quant_in, QuantArg *quant_out, int tid, int thread_num); + +void ComputeNearestNeighborInt(const int32_t pos, const int in_size, const int32_t new_size, const bool align_corners, + int32_t *nearest); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_RESIZE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/scale_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/scale_int8.h new file mode 100755 index 00000000000..993e5b808ca --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/scale_int8.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_SCALE_INT8_H_ +#define MINDSPORE_LITE_NNACL_SCALE_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/scale.h" +#include "nnacl/nnacl_common.h" + +#ifdef __cplusplus +extern "C" { +#endif +void DoScaleInt8(const int8_t *in_data, int8_t *out_data, const int8_t *scale, const ScaleParameter *scale_param, + int real_dst_count); +void DoScaleWithBiasInt8(const int8_t *in_data, int8_t *out_data, const int8_t *scale, const int8_t *offset, + const ScaleParameter *scale_param, int real_dst_count); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_SCALE_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/sigmoid_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/sigmoid_int8.h new file mode 100755 index 00000000000..c2ecfdae59d --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/sigmoid_int8.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_Sigmoid_INT8_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_Sigmoid_INT8_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/int8/fixed_point.h" + +#ifdef __cplusplus +extern "C" { +#endif +int SigmoidInt8(const int8_t *src, int length, int8_t *dst, int8_t *table); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_Sigmoid_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/slice_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/slice_int8.h new file mode 100755 index 00000000000..70ac1fbd8c9 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/slice_int8.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_INT8_SLICE_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_SLICE_INT8_H_ + +#include +#include +#include "nnacl/op_base.h" +#include "nnacl/slice_parameter.h" +#include "nnacl/int8/fixed_point.h" + +#ifdef __cplusplus +extern "C" { +#endif +int SliceInt8NoParallel(const int8_t *input, int8_t *output, SliceParameter *param); +int SliceInt8(const int8_t *input, int8_t *output, SliceParameter *param, int thread_id); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_SLICE_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/softmax_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/softmax_int8.h new file mode 100755 index 00000000000..83c8aa7ca06 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/softmax_int8.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INT8_SOFTMAX_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_SOFTMAX_INT8_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/softmax_parameter.h" +#include "nnacl/int8/fixed_point.h" +#include "nnacl/int8/quantize.h" + +#ifdef __cplusplus +extern "C" { +#endif +int SoftmaxInt8(const int8_t *input_ptr, int8_t *output_ptr, int count, int *exp_data, int *sum_data, + SoftmaxQuantArg quant_param, SoftmaxParameter *parameter); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_SOFTMAX_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/space_to_batch_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/space_to_batch_int8.h new file mode 100755 index 00000000000..a8e6df17a85 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/space_to_batch_int8.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_INT8_SPACE_TO_BATCH_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_SPACE_TO_BATCH_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/fp32/space_to_batch_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif +void DoSpaceToBatchNHWCInt8(const int8_t *input, int8_t *output, const int *block_sizes, const int *in_shape, + const int *out_shape); +void DoSpaceToBatchPaddingNHWCInt8(const int8_t *input, int8_t *output, SpaceToBatchParameter *param, int32_t zp); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_SPACE_TO_BATCH_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/splice_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/splice_int8.h new file mode 100755 index 00000000000..326aea4a0df --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/splice_int8.h @@ -0,0 +1,30 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_INT8_SPLICE_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_SPLICE_INT8_H_ +#include +#include "nnacl/splice_parameter.h" +#ifdef __cplusplus +extern "C" { +#endif + +void SpliceInt8(const int8_t *src_data, int src_row, int src_col, const SpliceParameter *splice_parameter, + int8_t *dst_data, int dst_row, int dst_col); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_INT8_SPLICE_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/split_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/split_int8.h new file mode 100755 index 00000000000..065674c56a1 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/split_int8.h @@ -0,0 +1,33 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INT8_SPLIT_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_SPLIT_INT8_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/split_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +int Int8DoSplit(int8_t *in_data, int8_t **out_data, const int *input_shape, int offset, int num_unit, + SplitParameter *split_param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_SPLIT_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/squeeze_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/squeeze_int8.h new file mode 100755 index 00000000000..8e1a3505aeb --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/squeeze_int8.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_SQUEEZE_INT8_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_SQUEEZE_INT8_H_ + +#include "nnacl/squeeze_parameter.h" +#include "nnacl/int8/quantize.h" + +#ifdef __cplusplus +extern "C" { +#endif +void SqueezeInt8(const int8_t *input_ptr, int8_t *output_ptr, const SqueezeQuantArg *quant_Squeeze_parm, int num, + int task_id, int thread_count); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_SQUEEZE_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/sub_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/sub_int8.h new file mode 100755 index 00000000000..6764072e70b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/sub_int8.h @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INT8_SUB_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_SUB_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" + +#ifdef __cplusplus +extern "C" { +#endif +int SubInt8(int8_t *input0_data, int8_t *input1_data, int8_t *output_data, int64_t real_dst_count, SubQuantArg *para); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_SUB_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/tanh_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/tanh_int8.h new file mode 100755 index 00000000000..1ad2cbecf41 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/tanh_int8.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INT8_TANH_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_TANH_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" +#include "nnacl/int8/fixed_point.h" +#include "nnacl/int8/quant_dtype_cast_int8.h" +#include "nnacl/fp32/activation_fp32.h" + +typedef struct TanhQuantParameter { + int32_t in_zp_; + int32_t out_zp_; + double in_scale_; + double out_scale_; +} TanhQuantParameter; + +#ifdef __cplusplus +extern "C" { +#endif + +void TanhInt8(const int8_t *input_ptr, int8_t *output_ptr, int size, TanhQuantParameter *quant); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_TANH_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/topk_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/topk_int8.h new file mode 100755 index 00000000000..6dfb76725c4 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/topk_int8.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INT8_TOPK_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_TOPK_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/fp32/topk_fp32.h" + +typedef struct TopkNodeInt8 { + int8_t element; + int32_t index; +} TopkNodeInt8; + +#ifdef __cplusplus +extern "C" { +#endif +void TopkInt8(int8_t *input_data, int8_t *output_data, int32_t *output_index, TopkParameter *parameter); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_TOPK_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/transpose_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/transpose_int8.h new file mode 100755 index 00000000000..c90c485bc14 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/transpose_int8.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INT8_TRANSPOSE_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_TRANSPOSE_INT8_H_ + +#include +#include "nnacl/transpose.h" +#include "nnacl/errorcode.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int DoTransposeInt8(const int8_t *in_data, int8_t *out_data, const int *output_shape, + TransposeParameter *transpose_param, int h_start, int h_end, int *dim_size, int *position); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INT8_TRANSPOSE_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/unsqueeze_int8.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/unsqueeze_int8.h new file mode 100755 index 00000000000..b6fbce9cd29 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/int8/unsqueeze_int8.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_UNSQUEEZE_INT8_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_UNSQUEEZE_INT8_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/unsqueeze_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +int Int8Unsqueeze(int8_t *input_ptr, int8_t *output_ptr, UnSqueezeParameter *para_, size_t data_size, int task_id); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_UNSQUEEZE_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/intrinsics/avx/common_utils.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/intrinsics/avx/common_utils.h new file mode 100755 index 00000000000..14589405dc5 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/intrinsics/avx/common_utils.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_X86_64_AVX_COMMON_UTILS_H_ +#define MINDSPORE_LITE_NNACL_X86_64_AVX_COMMON_UTILS_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif +#ifdef __GNUC__ +#if __GNUC__ < 8 +#define _mm256_set_m128i(xmm1, xmm2) \ + _mm256_permute2f128_si256(_mm256_castsi128_si256(xmm1), _mm256_castsi128_si256(xmm2), 2) +#define _mm256_set_m128f(xmm1, xmm2) \ + _mm256_permute2f128_ps(_mm256_castps128_ps256(xmm1), _mm256_castps128_ps256(xmm2), 2) +#endif +#endif + +// Signed saturating Add +__m128i _mm_adds_epi32(__m128i a, __m128i b); + +// Signed rounding shift right +__m128i _mm_rshr_epi32(__m128i a, int shift); + +// Signed saturating Rounding Doubling Multiply return High half +__m128i _mm_qrdmulh_epi32(__m128i a, __m128i b); +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_X86_64_AVX_COMMON_UTILS_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/intrinsics/ms_simd_instructions.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/intrinsics/ms_simd_instructions.h new file mode 100755 index 00000000000..6c1153d6d3b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/intrinsics/ms_simd_instructions.h @@ -0,0 +1,226 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INTRINSICS_MS_SIMD_INSTRUCTIONS_H_ +#define MINDSPORE_LITE_NNACL_INTRINSICS_MS_SIMD_INSTRUCTIONS_H_ +#include +#ifdef ENABLE_ARM +#include +#endif +#if defined(ENABLE_SSE) || defined(ENABLE_AVX) +#include +#endif + +#ifdef ENABLE_ARM +#define MS_FLOAT32X4 float32x4_t +#define MS_INT32X4 int32x4_t +#define MS_LDQ_F32 vld1q_f32 +#define MS_LDQ_EPI32 vld1q_s32 +#define MS_ADDQ_F32 vaddq_f32 +#define MS_ADDQ_EPI32 vaddq_s32 +#define MS_MOVQ_F32 vmovq_n_f32 +#define MS_MOVQ_EPI32 vmovq_n_s32 +#define MS_SUBQ_F32 vsubq_f32 +#define MS_MLAQ_F32(src1, src2, src3) vmlaq_f32(src1, src2, src3) +#define MS_STQ_F32 vst1q_f32 +#define MS_STQ_EPI32 vst1q_s32 +#define MS_MAXQ_F32 vmaxq_f32 +#define MS_MAXQ_EPI32 vmaxq_s32 +#define MS_MINQ_F32 vminq_f32 +#define MS_MINQ_EPI32 vminq_s32 +#define MS_MULQ_F32(src1, src2) vmulq_f32(src1, src2) +#define MS_MULQ_EPI32(src1, src2) vmulq_s32(src1, src2) +#ifdef ENABLE_ARM64 +#define MS_DIVQ_F32(src1, src2) vdivq_f32(src1, src2) +#else +inline static float32x4_t vrecp(float32x4_t v) { + float32x4_t r = vrecpeq_f32(v); + r = vmulq_f32(vrecpsq_f32(v, r), r); + r = vmulq_f32(vrecpsq_f32(v, r), r); + return r; +} +#define MS_DIVQ_F32(src1, src2) vmulq_f32(src1, vrecp(src2)) +#endif +#define MS_MULQ_N_F32(src1, src2) vmulq_n_f32(src1, src2) +#define MS_MULQ_N_EPI32(src1, src2) vmulq_n_s32(src1, src2) +#define MS_DIVQ_N_F32(src1, src2) vdivq_n_f32(src1, src2) +#define MS_SLLIQ_EPI32(src1, src2) vshlq_s32(src1, vmovq_n_s32(src2)) +#define MS_CVTQPS_EPI32(src) vcvtq_s32_f32(src) +#define MS_CVTQEPI32_PS(src) vcvtq_f32_s32(src) +#define MS_CMPGTQ_F32(src1, src2) vcgtq_f32(src1, src2) +#define MS_CMPGTQ_EPI32(src1, src2) vcgtq_s32(src1, src2) +// Note: Compared with X86, the vbslq_f32 parameters are the opposite with _mm_blendv_f32 +#define MS_BLENDQ_F32(src1, src2, src3) vbslq_f32(src3, src2, src1) +#define MS_BLENDQ_EPI32(src1, src2, src3) vbslq_s32(src3, src2, src1) +#endif + +#if defined(ENABLE_AVX) +#define MS_FLOAT32X8 __m256 +#define MS_INT32X8 __m256i +#define MS_LD256_F32 _mm256_loadu_ps +#define MS_LD256_EPI32(src) _mm256_loadu_si256((__m256i const *)(src)) +#define MS_ADD256_F32 _mm256_add_ps +#define MS_ADD256_EPI32 _mm256_add_epi32 +#define MS_MOV256_F32 _mm256_set1_ps +#define MS_MOV256_EPI32 _mm256_set1_epi32 +#define MS_MLA256_F32(src1, src2, src3) _mm256_add_ps(src1, _mm256_mul_ps(src2, src3)) +#define MS_ST256_F32 _mm256_storeu_ps +#define MS_ST256_EPI32(src1, src2) _mm256_storeu_si256((__m256i *)(src1), src2) +#define MS_SUB256_F32 _mm256_sub_ps +#define MS_MAX256_F32 _mm256_max_ps +#define MS_MAX256_EPI32 _mm256_max_epi32 +#define MS_MIN256_F32 _mm256_min_ps +#define MS_MIN256_EPI32 _mm256_min_epi32 +#define MS_MUL256_F32(src1, src2) _mm256_mul_ps(src1, src2) +#define MS_MUL256_EPI32(src1, src2) _mm256_mul_epi32(src1, src2) +#define MS_DIV256_F32(src1, src2) _mm256_div_ps(src1, src2) +#define MS_MUL256_N_F32(src1, src2) _mm256_mul_ps(src1, _mm256_set1_ps(src2)) +#define MS_MUL256_N_EPI32(src1, src2) _mm256_mul_epi32(src1, _mm256_set1_epi32(src2)) +#define MS_DIV256_N_F32(src1, src2) _mm256_div_ps(src1, _mm256_set1_ps(src2)) +#define MS_SLLI256_EPI32(src1, src2) _mm256_slli_epi32(src1, src2) +#define MS_CVT256PS_EPI32(src) _mm256_cvttps_epi32(src) +#define MS_CVT256EPI32_PS(src) _mm256_cvtepi32_ps(src) // truncate float to int +#define MS_CMP256_F32(src1, src2, src3) _mm256_cmp_ps(src1, src2, src3) +#define MS_CMPGT256_EPI32(src1, src2) _mm256_cmpgt_epi32(src1, src2) +#define MS_BLEND256_F32(src1, src2, src3) _mm256_blendv_ps(src1, src2, src3) +#define MS_BLEND256_EPI32(src1, src2, src3) _mm256_blendv_epi8(src1, src2, src3) +#endif + +#if defined(ENABLE_SSE) +#define MS_FLOAT32X4 __m128 +#define MS_INT32X4 __m128i +#define MS_LDQ_F32 _mm_loadu_ps +#define MS_LDQ_EPI32(src) _mm_loadu_si128((__m128i const *)(src)) +#define MS_ADDQ_F32 _mm_add_ps +#define MS_ADDQ_EPI32 _mm_add_epi32 +#define MS_MOVQ_F32 _mm_set1_ps +#define MS_MOVQ_EPI32 _mm_set1_epi32 +#define MS_MLAQ_F32(src1, src2, src3) _mm_add_ps(src1, _mm_mul_ps(src2, src3)) +#define MS_STQ_F32 _mm_storeu_ps +#define MS_STQ_EPI32(src1, src2) _mm_storeu_si128((__m128i *)(src1), src2) +#define MS_SUBQ_F32 _mm_sub_ps +#define MS_MAXQ_F32 _mm_max_ps +#define MS_MAXQ_EPI32 _mm_max_epi32 +#define MS_MINQ_F32 _mm_min_ps +#define MS_MINQ_EPI32 _mm_min_epi32 +#define MS_MULQ_F32(src1, src2) _mm_mul_ps(src1, src2) +#define MS_MULQ_EPI32(src1, src2) _mm_mul_epi32(src1, src2) +#define MS_DIVQ_F32(src1, src2) _mm_div_ps(src1, src2) +#define MS_MULQ_N_F32(src1, src2) _mm_mul_ps(src1, _mm_set1_ps(src2)) +#define MS_MULQ_N_EPI32(src1, src2) _mm_mul_epi32(src1, _mm_set1_epi32(src2)) +#define MS_DIVQ_N_F32(src1, src2) _mm_div_ps(src1, _mm_set1_ps(src2)) +#define MS_SLLIQ_EPI32(src1, src2) _mm_slli_epi32(src1, src2) +#define MS_CVTQPS_EPI32(src) _mm_cvttps_epi32(src) // truncate float to int +#define MS_CVTQEPI32_PS(src) _mm_cvtepi32_ps(src) +#define MS_CMPGTQ_F32(src1, src2) _mm_cmpgt_ps(src1, src2) +#define MS_CMPGTQ_EPI32(src1, src2) _mm_cmpgt_epi32(src1, src2) +#define MS_BLENDQ_F32(src1, src2, src3) _mm_blendv_ps(src1, src2, src3) +#define MS_BLENDQ_EPI32(src1, src2, src3) _mm_blendv_epi8(src1, src2, src3) +#endif + +#define LOAD256X8_F32(src, input_ptr, num) \ + MS_FLOAT32X8 src##1 = MS_LD256_F32(input_ptr + 0 * num); \ + MS_FLOAT32X8 src##2 = MS_LD256_F32(input_ptr + 1 * num); \ + MS_FLOAT32X8 src##3 = MS_LD256_F32(input_ptr + 2 * num); \ + MS_FLOAT32X8 src##4 = MS_LD256_F32(input_ptr + 3 * num); \ + MS_FLOAT32X8 src##5 = MS_LD256_F32(input_ptr + 4 * num); \ + MS_FLOAT32X8 src##6 = MS_LD256_F32(input_ptr + 5 * num); \ + MS_FLOAT32X8 src##7 = MS_LD256_F32(input_ptr + 6 * num); \ + MS_FLOAT32X8 src##8 = MS_LD256_F32(input_ptr + 7 * num); + +#define STORE256X8_F32(output_ptr, num, dst) \ + MS_ST256_F32(output_ptr + 0 * num, dst##1); \ + MS_ST256_F32(output_ptr + 1 * num, dst##2); \ + MS_ST256_F32(output_ptr + 2 * num, dst##3); \ + MS_ST256_F32(output_ptr + 3 * num, dst##4); \ + MS_ST256_F32(output_ptr + 4 * num, dst##5); \ + MS_ST256_F32(output_ptr + 5 * num, dst##6); \ + MS_ST256_F32(output_ptr + 6 * num, dst##7); \ + MS_ST256_F32(output_ptr + 7 * num, dst##8); + +#define LOAD128X8_F32(src, input_ptr, num) \ + MS_FLOAT32X4 src##1 = MS_LDQ_F32(input_ptr + 0 * num); \ + MS_FLOAT32X4 src##2 = MS_LDQ_F32(input_ptr + 1 * num); \ + MS_FLOAT32X4 src##3 = MS_LDQ_F32(input_ptr + 2 * num); \ + MS_FLOAT32X4 src##4 = MS_LDQ_F32(input_ptr + 3 * num); \ + MS_FLOAT32X4 src##5 = MS_LDQ_F32(input_ptr + 4 * num); \ + MS_FLOAT32X4 src##6 = MS_LDQ_F32(input_ptr + 5 * num); \ + MS_FLOAT32X4 src##7 = MS_LDQ_F32(input_ptr + 6 * num); \ + MS_FLOAT32X4 src##8 = MS_LDQ_F32(input_ptr + 7 * num); + +#define STORE128X8_F32(output_ptr, num, dst) \ + MS_STQ_F32(output_ptr + 0 * num, dst##1); \ + MS_STQ_F32(output_ptr + 1 * num, dst##2); \ + MS_STQ_F32(output_ptr + 2 * num, dst##3); \ + MS_STQ_F32(output_ptr + 3 * num, dst##4); \ + MS_STQ_F32(output_ptr + 4 * num, dst##5); \ + MS_STQ_F32(output_ptr + 5 * num, dst##6); \ + MS_STQ_F32(output_ptr + 6 * num, dst##7); \ + MS_STQ_F32(output_ptr + 7 * num, dst##8); + +static inline MS_FLOAT32X4 MS_TANHX4_F32(MS_FLOAT32X4 src) { + static const float data[] = {378.0f, 17325.0f, 135135.0f, 28.0f, 3150.0f, 62370.0f}; + static const MS_FLOAT32X4 neg = {-1.0f, -1.0f, -1.0f, -1.0f}; + static const MS_FLOAT32X4 pos = {1.0f, 1.0f, 1.0f, 1.0f}; + MS_FLOAT32X4 square = src * src; + MS_FLOAT32X4 a = (((square + data[0]) * square + data[1]) * square + data[2]) * src; + MS_FLOAT32X4 b = ((data[3] * square + data[4]) * square + data[5]) * square + data[2]; + return MS_MINQ_F32(MS_MAXQ_F32(a / b, neg), pos); +} + +#ifdef ENABLE_AVX +static inline MS_FLOAT32X8 MS_TANHX8_F32(MS_FLOAT32X8 src) { + static const float data[] = {378.0f, 17325.0f, 135135.0f, 28.0f, 3150.0f, 62370.0f}; + static const MS_FLOAT32X8 neg = {-1.0f, -1.0f, -1.0f, -1.0f, -1.0f, -1.0f, -1.0f, -1.0f}; + static const MS_FLOAT32X8 pos = {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}; + MS_FLOAT32X8 square = src * src; + MS_FLOAT32X8 a = (((square + data[0]) * square + data[1]) * square + data[2]) * src; + MS_FLOAT32X8 b = ((data[3] * square + data[4]) * square + data[5]) * square + data[2]; + return MS_MIN256_F32(MS_MAX256_F32(a / b, neg), pos); +} +#endif + +static inline MS_FLOAT32X4 MS_ERFX4_F32(MS_FLOAT32X4 src) { + MS_FLOAT32X4 dst; + dst[0] = erff(src[0]); + dst[1] = erff(src[1]); + dst[2] = erff(src[2]); + dst[3] = erff(src[3]); + return dst; +} + +#ifdef ENABLE_ARM64 +static inline float16x8_t MS_TANHX8_F16(float16x8_t src) { + float32x4_t src_low = vcvt_f32_f16(vget_low_f16(src)); + float32x4_t src_high = vcvt_f32_f16(vget_high_f16(src)); + return vcombine_f16(vcvt_f16_f32(MS_TANHX4_F32(src_low)), vcvt_f16_f32(MS_TANHX4_F32(src_high))); +} + +static inline float16x8_t MS_ERFX8_F16(float16x8_t src) { + float16x8_t dst; + dst[0] = erff(src[0]); + dst[1] = erff(src[1]); + dst[2] = erff(src[2]); + dst[3] = erff(src[3]); + dst[4] = erff(src[4]); + dst[5] = erff(src[5]); + dst[6] = erff(src[6]); + dst[7] = erff(src[7]); + return dst; +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INTRINSICS_MS_SIMD_INSTRUCTIONS_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/intrinsics/sse/sse_common.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/intrinsics/sse/sse_common.h new file mode 100755 index 00000000000..fd48d184c88 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/intrinsics/sse/sse_common.h @@ -0,0 +1,56 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INTRINSICS_SSE_SSE_COMMON_H_ +#define MINDSPORE_LITE_NNACL_INTRINSICS_SSE_SSE_COMMON_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +void ActBlock1(__m128 *v1, size_t relu, size_t relu6); +void ActBlock2(__m128 *v1, __m128 *v2, size_t relu, size_t relu6); +void ActBlock4(__m128 *v1, __m128 *v2, __m128 *v3, __m128 *v4, size_t relu, size_t relu6); +void ActBlock8(__m128 *v1, __m128 *v2, __m128 *v3, __m128 *v4, __m128 *v5, __m128 *v6, __m128 *v7, __m128 *v8, + size_t relu_type); + +void WriteCol1(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, + __m128 *dst7, __m128 *dst8, int stride, int extra_stride, int r); +void WriteCol2(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, + __m128 *dst7, __m128 *dst8, int stride, int r); +void WriteCol2Opt(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, + __m128 *dst7, __m128 *dst8, int stride, int r); +void WriteCol3(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, + __m128 *dst7, __m128 *dst8, int stride, int extra_stride, int r); +void WriteCol4(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, + __m128 *dst7, __m128 *dst8, int stride, int extra_stride, int r); +void WriteCol5(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, + __m128 *dst7, __m128 *dst8, int stride, int extra_stride, int r); +void WriteCol6(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, + __m128 *dst7, __m128 *dst8, int stride, int extra_stride, int r); +void WriteCol7(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, + __m128 *dst7, __m128 *dst8, int stride, int extra_stride, int r); +void WriteCol8(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, + __m128 *dst7, __m128 *dst8, int stride, int extra_stride, int r); + +void DoBiasBlock8(const float *bias_ptr, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, + __m128 *dst6, __m128 *dst7, __m128 *dst8); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_INTRINSICS_SSE_SSE_COMMON_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/l2_norm_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/l2_norm_parameter.h new file mode 100755 index 00000000000..4343ef3f5ec --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/l2_norm_parameter.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_L2NORM_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_L2NORM_PARAMETER_H_ + +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" + +typedef struct L2NormParameter { + // Primitive parameter + OpParameter op_parameter_; + float epsilon_; + int axis_[MAX_SHAPE_SIZE]; + // shape correlative + size_t axis_num_; + int data_num_; + int *shape_; + size_t shape_num_; + // other parameter + ActType act_type_; +} L2NormParameter; + +typedef struct { + QuantArg in_; + QuantArg out_; +} L2NormQuantArg; + +#endif // MINDSPORE_LITE_NNACL_L2NORM_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/layer_norm_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/layer_norm_parameter.h new file mode 100755 index 00000000000..928662d9d7e --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/layer_norm_parameter.h @@ -0,0 +1,50 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_LAYER_NORM_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_LAYER_NORM_PARAMETER_H_ + +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" + +enum ElementwiseMode { ELEMENTWISE_NOT = 0, ELEMENTWISE_PER_CHANNEL = 1, ELEMENTWISE_PER_NUM = 2 }; +typedef struct LayerNormParameter { + // Primitive parameter + OpParameter op_parameter_; + float epsilon_; + enum ElementwiseMode elementwise_mode_; + bool elementwise_affine_; + int begin_norm_axis_; + int begin_params_axis_; + // shape correlative + int norm_inner_size_; + int norm_outer_size_; + int params_inner_size_; + int params_outer_size_; + int normalized_dims_; + int normalized_shape_[MAX_SHAPE_SIZE]; + // other parameter + int thread_count_; + int thread_outsize_; +} LayerNormParameter; + +typedef struct LayerNormQuantArg { + int32_t in_zp_; + int32_t out_zp_; + double in_scale_; + double out_scale_; +} LayerNormQuantArg; + +#endif // MINDSPORE_LITE_NNACL_LAYER_NORM_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/lsh_projection_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/lsh_projection_parameter.h new file mode 100755 index 00000000000..9275279b053 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/lsh_projection_parameter.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_LSH_PROJECTION_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_LSH_PROJECTION_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct LshProjectionParameter { + // Primitive parameter + OpParameter op_parameter_; + // shape correlative + int hash_shape_[2]; + // other parameter + int lsh_type_; + int feature_num_; + char **hash_buffs_; + size_t hash_buff_size_; + int64_t thread_stride_; +} LshProjectionParameter; + +#endif // MINDSPORE_LITE_NNACL_LSH_PROJECTION_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/lstm_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/lstm_parameter.h new file mode 100755 index 00000000000..d29c880f87f --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/lstm_parameter.h @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_LSTM_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_LSTM_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct LstmParameter { + // Primitive parameter + OpParameter op_parameter_; + // shape correlative + int input_size_; + int hidden_size_; // output_size + int seq_len_; + int batch_; + // other parameter + int input_step_; + int output_step_; + bool bidirectional_; + float zoneout_cell_; + float zoneout_hidden_; + int input_row_align_; + int input_col_align_; + int state_row_align_; + int state_col_align_; + int col_align_; + int row_align_; +} LstmParameter; + +#endif // MINDSPORE_LITE_NNACL_LSTM_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/matmul_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/matmul_parameter.h new file mode 100755 index 00000000000..9f9c023568e --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/matmul_parameter.h @@ -0,0 +1,79 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_MATMUL_H_ +#define MINDSPORE_LITE_NNACL_MATMUL_H_ + +#include "nnacl/op_base.h" + +typedef void (*MATMUL_OPT_R4_FUNC)(const int8_t *a, const int8_t *b, int *dst, int row_4, int col_4, int deep_16, + const int *input_sum, const int *bias); + +typedef void (*MATMUL_OPT_R_FUNC)(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_4, + size_t stride, const int32_t *input_sum, const int32_t *bias, int32_t *left_shift, + int32_t *right_shift, int32_t *multiplier, int32_t output_zp, int32_t mini, + int32_t maxi, size_t per_channel); + +typedef void (*MATMUL_OPT_DP_FUNC)(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_4, + size_t stride, const int32_t *input_sum, const int32_t *bias, int32_t *left_shift, + int32_t *right_shift, int32_t *multiplier, int32_t output_zp, int32_t mini, + int32_t maxi, size_t per_channel, int *filter_zp); + +typedef enum OutType { OutType_C8 = 0, OutType_Nhwc = 1, OutType_TileC8 = 2 } OutType; + +typedef struct MatMulParameter { + // Primitive parameter + OpParameter op_parameter_; + bool has_bias_; + + // other parameter + int row_; + int col_; + int row_4_; + int row_6_; + int row_12_; + int row_16_; + int row_align_; + int col_4_; + int col_8_; + int col_align_; + int deep_; + int deep_4_; + int deep_16_; + int batch; + bool a_transpose_; /* false : row-major */ + bool b_transpose_; /* true : col-major */ + bool a_const_; + bool b_const_; + ActType act_type_; + bool use_axis_; + int axis_; +} MatMulParameter; + +typedef struct MatmulQuantParameter { + QuantArg input_; + QuantArg weight_; + QuantArg output_; + int32_t out_act_min_; + int32_t out_act_max_; + float *filter_scale_; + int32_t *filter_zp_; + int32_t *left_shift_; + int32_t *right_shift_; + int32_t *quant_multiplier_; +} MatmulQuantParameter; + +#endif // MINDSPORE_LITE_NNACL_MATMUL_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/mul_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/mul_parameter.h new file mode 100755 index 00000000000..7c2d8fe181a --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/mul_parameter.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_MUL_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_MUL_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct MulQuantArg { + QuantArg in_quant_args_[2]; + QuantArg out_quant_arg_; + int output_multiplier_; + int output_activation_min_; + int output_activation_max_; + int shift_left_; + int shift_right_; +} MulQuantArg; + +typedef struct MulParameter { + // Primitive parameter + OpParameter op_parameter_; + // other parameter + int thread_count_; + MulQuantArg mul_quant_arg_; +} MulParameter; + +#endif // MINDSPORE_LITE_NNACL_MUL_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/nnacl_common.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/nnacl_common.h new file mode 100755 index 00000000000..3e02fe89917 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/nnacl_common.h @@ -0,0 +1,60 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_NNACL_COMMON_H_ +#define MINDSPORE_LITE_NNACL_NNACL_COMMON_H_ + +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif + +static inline void ComputeStrides(const int *shape, int *strides, const int ndim) { + int stride = 1; + for (int i = ndim - 1; i >= 0; i--) { + strides[i] = stride; + stride *= shape[i]; + } +} + +static inline void ComputeAxisDims(const int *shape, int shape_size, int axis, int *out_count, int *axis_count, + int *in_count) { + *out_count = 1; + *in_count = 1; + for (int i = 0; i < shape_size; i++) { + if (i < axis) *out_count = (*out_count) * shape[i]; + if (i == axis) *axis_count = shape[axis]; + if (i > axis) *in_count = (*in_count) * shape[i]; + } +} + +static const unsigned int FP32_BIT_SIZE = 32; +static const unsigned int FP32_EXPONENT_BIAS = 127; +static const unsigned int FP32_SIGNIFICAND = 23; +static const unsigned int FP32_EXPONENT_MAX = 255; +static const unsigned int FP16_BIT_SIZE = 16; +static const unsigned int FP16_EXPONENT_BIAS = 15; +static const unsigned int FP16_SIGNIFICAND = 10; +static const int FP16_EXPONENT_MAX = 30; +static const int FP16_EXPONENT_MIN = -10; +float ShortToFloat32(uint16_t src_value); +uint16_t Float32ToShort(float src_value); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_NNACL_COMMON_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/nnacl_utils.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/nnacl_utils.h new file mode 100755 index 00000000000..735425d3bbc --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/nnacl_utils.h @@ -0,0 +1,39 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_NNACL_UTILS_H_ +#define MINDSPORE_LITE_NNACL_NNACL_UTILS_H_ + +#include +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(__arm__) || defined(__aarch64__) +uint32_t getHwCap(int hwcap_type); +#endif + +#ifdef DEBUG +#include +#define NNACL_ASSERT(f) assert(f) +#else +#define NNACL_ASSERT(f) ((void)0) +#endif + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_NNACL_UTILS_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/non_max_suppression_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/non_max_suppression_parameter.h new file mode 100755 index 00000000000..409dc9b3426 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/non_max_suppression_parameter.h @@ -0,0 +1,28 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_NON_MAX_SUPPRESSION_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_NON_MAX_SUPPRESSION_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct NMSParameter { + // Primitive parameter + OpParameter op_parameter_; + int center_point_box_; +} NMSParameter; + +#endif // MINDSPORE_LITE_NNACL_NON_MAX_SUPPRESSION_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/op_base.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/op_base.h new file mode 100755 index 00000000000..476fdb8534f --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/op_base.h @@ -0,0 +1,107 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_OP_BASE_H_ +#define MINDSPORE_LITE_NNACL_OP_BASE_H_ + +#include +#include +#include +#include +#if defined(ENABLE_AVX) || defined(ENABLE_SSE) || defined(ENABLE_ARM) +#include "nnacl/intrinsics/ms_simd_instructions.h" +#endif + +#define C2NUM 2 +#define C4NUM 4 +#define C6NUM 6 +#define C8NUM 8 +#define C12NUM 12 +#define C16NUM 16 +#define TILE_NUM 8 + +#define MSMIN(x, y) ((x) < (y) ? (x) : (y)) +#define MSMAX(x, y) ((x) > (y) ? (x) : (y)) + +#define UP_DIV(x, y) (((x) + (y) - (1)) / (y)) +#define UP_ROUND(x, y) (((x) + (y) - (1)) / (y) * (y)) +#define UP_ROUND_DIV(x, y) (x % y == 0 ? (x / y) : (x / y) + 1) +#define DOWN_DIV(x, y) (((x) - (y) + (1)) / (y)) + +#define MSVALID(left, x, right) (MSMIN((MSMAX(left, x)), right)) + +#define COMM_SHAPE_SIZE 4 +#define MAX_SHAPE_SIZE 8 + +#define DIMENSION_4D 4 +#define DIMENSION_6D 6 +#define DIMENSION_7D 7 +#define kInputIndex 0 +#define kWeightIndex 1 +#define kBiasIndex 2 +#define kOutputIndex 0 +#define kNHWC_N 0 +#define kNHWC_H 1 +#define kNHWC_W 2 +#define kNHWC_C 3 +#define kInputSize1 2 +#define kInputSize2 3 +#define MAX_AXIS_SIZE 6 +#define MAX_LEN 256 +#define FLT16_MAX 65504 + +typedef enum LiteDataType { + kDataTypeFloat, + kDataTypeFloat16, + kDataTypeInt, + kDataTypeInt8, + KDataTypeBool, +} LiteDataType; + +typedef enum DataOrder { + RowMajor, + ColMajor, +} DataOrder; + +typedef struct OpParameter { + char name_[100]; + bool infer_flag_; + int type_; + int thread_num_; + int quant_type_; +} OpParameter; + +typedef struct QuantArg { + float scale_; + int32_t zp_; +} QuantArg; + +typedef struct QuantMulArg { + int32_t multiplier_; + int left_shift_; + int right_shift_; +} QuantMulArg; + +typedef enum ActType { ActType_No, ActType_Relu, ActType_Sigmod, ActType_Relu6, ActType_Prelu } ActType; +typedef enum PadMode { Pad_pad, Pad_same, Pad_valid } PadMode; +typedef enum RoundingMode { Rounding_No, Rounding_Away_from_zero, Rounding_Up } RoundingMode; +typedef enum CalFixedMultiplierMode { + Method_No, + Method_SinglePrecision, + Method_DoublePrecision +} CalFixedMultiplierMode; + +#endif // MINDSPORE_LITE_NNACL_OP_BASE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/pack.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/pack.h new file mode 100755 index 00000000000..f51ec8410a9 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/pack.h @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_PACK_H_ +#define MINDSPORE_LITE_NNACL_PACK_H_ + +#include "nnacl/fp32/pack_fp32.h" +#include "nnacl/int8/pack_int8.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_PACK_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/pad_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/pad_parameter.h new file mode 100755 index 00000000000..fb9c7ae136c --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/pad_parameter.h @@ -0,0 +1,51 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_PAD_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_PAD_PARAMETER_H_ + +#include "nnacl/op_base.h" + +#define MAX_PAD_SIZE 8 +#define DEFAULT_PAD_NDIMS 4 + +typedef struct PadQuantArg { + QuantArg *in_quant_args_; + QuantArg *out_quanr_args_; + int8_t *constant_value_; +} PadQuantArg; + +typedef struct PadParameter { + // Primitive parameter + OpParameter op_parameter_; + int paddings_[MAX_SHAPE_SIZE]; + int pad_mode_; + float constant_value_; + // shape correlative + int padding_length; + // other parameter + int in_strides[COMM_SHAPE_SIZE]; + int out_strides[DEFAULT_PAD_NDIMS]; + int mirror_offset_; + PadQuantArg pad_quant_arg_; +} PadParameter; + +typedef struct MirrorPadBlock { + int out_offset_; + int out_stride_[DEFAULT_PAD_NDIMS]; + int size_[DEFAULT_PAD_NDIMS]; +} MirrorPadBlock; + +#endif // MINDSPORE_LITE_NNACL_PAD_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/pooling_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/pooling_parameter.h new file mode 100755 index 00000000000..84f7e1b068f --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/pooling_parameter.h @@ -0,0 +1,57 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_POOLING_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_POOLING_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef enum PoolMode { PoolMode_No, PoolMode_MaxPool, PoolMode_AvgPool } PoolMode; + +typedef enum RoundMode { RoundMode_No, RoundMode_Ceil, RoundMode_Floor } RoundMode; + +typedef struct PoolingParameter { + // Primitive parameter + OpParameter op_parameter_; + PoolMode pool_mode_; + RoundMode round_mode_; + PadMode pad_mode_; + ActType act_type_; + int avg_mode_; + bool global_; + int window_w_; + int window_h_; + int stride_w_; + int stride_h_; + // shape correlative + int input_w_; + int input_h_; + int input_batch_; + int input_channel_; + int output_w_; + int output_h_; + int output_batch_; + int output_channel_; + int pad_u_; + int pad_d_; + int pad_l_; + int pad_r_; + // other parameter + int thread_num_; + QuantArg **quant_args_; + bool quantize_; +} PoolingParameter; + +#endif // MINDSPORE_LITE_NNACL_POOLING_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/power_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/power_parameter.h new file mode 100755 index 00000000000..34f46a73e82 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/power_parameter.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_POWER_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_POWER_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct PowerQuantArg { + QuantArg in_args_; + QuantArg exp_args_; + QuantArg out_args_; + int output_activation_min_; + int output_activation_max_; +} PowerQuantArg; + +typedef struct PowerParameter { + // Primitive parameter + OpParameter op_parameter_; + float power_; + float scale_; + float shift_; + // other parameter + PowerQuantArg quant_arg_; + bool broadcast_; +} PowerParameter; + +#endif // MINDSPORE_LITE_NNACL_POWER_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/predict_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/predict_parameter.h new file mode 100755 index 00000000000..a4e0fc857b2 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/predict_parameter.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_PREDICT_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_PREDICT_PARAMETER_H_ + +#include "nnacl/op_base.h" +typedef struct { + // Primitive parameter + OpParameter op_parameter_; + // other parameter + int output_num; + float weight_threshold; +} PredictParameter; + +typedef struct { + int label; + float weight; +} LabelInfo; +#endif // MINDSPORE_LITE_NNACL_PREDICT_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/prelu_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/prelu_parameter.h new file mode 100755 index 00000000000..5e1087f7f7d --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/prelu_parameter.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_PRELU_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_PRELU_PARAMETER_H_ + +#include "nnacl/op_base.h" +typedef struct PReluParameter { + // Primitive parameter + OpParameter op_parameter_; + // other parameter + float *slope_; + bool channelShared; + int tile_block_; + int channel_num_; + int input_num_; +} PReluParameter; + +#endif // MINDSPORE_LITE_NNACL_PRELU_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/prior_box_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/prior_box_parameter.h new file mode 100755 index 00000000000..f258299b7ee --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/prior_box_parameter.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_PRIOR_BOX_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_PRIOR_BOX_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct PriorBoxParameter { + // Primitive parameter + OpParameter op_parameter_; + int32_t min_sizes_size; + int32_t min_sizes[MAX_SHAPE_SIZE]; + int32_t max_sizes_size; + int32_t max_sizes[MAX_SHAPE_SIZE]; + int32_t aspect_ratios_size; + float aspect_ratios[MAX_SHAPE_SIZE]; + float variances[COMM_SHAPE_SIZE]; + int32_t image_size_w; + int32_t image_size_h; + float step_w; + float step_h; + bool clip; + bool flip; + float offset; +} PriorBoxParameter; + +#endif // MINDSPORE_LITE_NNACL_PRIOR_BOX_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/random_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/random_parameter.h new file mode 100755 index 00000000000..bf3473c16c5 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/random_parameter.h @@ -0,0 +1,27 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_RNADOM_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_RNADOM_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct RandomParam { + OpParameter op_parameter_; + int seed_; + int seed2_; +} RandomParam; + +#endif // MINDSPORE_LITE_NNACL_RNADOM_STANDARD_NORMAL_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/reduce_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/reduce_parameter.h new file mode 100755 index 00000000000..1e7ace41569 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/reduce_parameter.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_REDUCE_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_REDUCE_PARAMETER_H_ +#include "nnacl/op_base.h" + +typedef struct ReduceParameter { + // primitive parameter + OpParameter op_parameter_; + int axes_[MAX_SHAPE_SIZE]; + bool keep_dims_; + int mode_; + bool reduce_to_end_; + float coeff; + + // other parameter + int num_axes_; +} ReduceParameter; + +#endif // MINDSPORE_LITE_NNACL_REDUCE_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/reshape_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/reshape_parameter.h new file mode 100755 index 00000000000..1e64e8a3f6a --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/reshape_parameter.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_RESHAHPE_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_RESHAHPE_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct ReshapeQuantArg { + QuantArg in_args_; + QuantArg out_args_; + int output_activation_min_; + int output_activation_max_; +} ReshapeQuantArg; + +typedef struct ReshapeParameter { + // primitive parameter + OpParameter op_parameter_; + int shape_dim_; + int shape_[MAX_SHAPE_SIZE]; + + // other parameter + ReshapeQuantArg quant_para_; + int thread_count_; +} ReshapeParameter; + +#endif // MINDSPORE_LITE_NNACL_RESHAHPE_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/resize_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/resize_parameter.h new file mode 100755 index 00000000000..5f7c486482e --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/resize_parameter.h @@ -0,0 +1,37 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_RESIZE_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_RESIZE_PARAMETER_H_ + +#include "nnacl/op_base.h" +typedef struct ResizeParameter { + // primitive parameter + OpParameter op_parameter_; + int method_; + int64_t new_height_; + int64_t new_width_; + int coordinate_transform_mode_; + float cubic_coeff_; + bool preserve_aspect_ratio_; +} ResizeParameter; + +typedef struct CropAndResizeParameter { + // primitive parameter + OpParameter op_parameter_; + int method_; + float extrapolation_value_; +} CropAndResizeParameter; +#endif // MINDSPORE_LITE_NNACL_RESIZE_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/reverse_sequence_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/reverse_sequence_parameter.h new file mode 100755 index 00000000000..dd5ce36462c --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/reverse_sequence_parameter.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_REVERSE_SEQUENCE_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_REVERSE_SEQUENCE_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct ReverseSequenceParameter { + // primitive parameter + OpParameter op_parameter_; + int seq_axis_; + int batch_axis_; + + // shape correlative + int input_shape0_[5]; + int output_shape_[5]; + int input_stride_[5]; + int output_stride_[5]; + + // other parameter + int ndim_; + int outer_count_; + int outer_stride_; + int inner_count_; + int inner_stride_; + int copy_byte_size_; + int total_data_size_; + bool is_seq_length_int32_; +} ReverseSequenceParameter; + +#endif // MINDSPORE_LITE_NNACL_REVERSE_SEQUENCE_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/scale.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/scale.h new file mode 100755 index 00000000000..dbca958234d --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/scale.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_SCALE_H_ +#define MINDSPORE_LITE_NNACL_SCALE_H_ + +#include "nnacl/op_base.h" + +typedef struct ScaleParameter { + // primitive parameter + OpParameter op_parameter_; + int axis_; + int activation_type_; + + // shape correlative + int outer_size_; + int axis_size_; + int inner_size_; + + // other parameter + bool const_scale_; + bool const_offset_; + QuantMulArg scale_mul_arg_; + QuantMulArg offset_mul_arg_; + int input_zp_; + int scale_zp_; + int offset_zp_; + int output_zp_; + int output_activation_min_; + int output_activation_max_; +} ScaleParameter; + +#endif // MINDSPORE_LITE_NNACL_SCALE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/sigmoid_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/sigmoid_parameter.h new file mode 100755 index 00000000000..f5cade2fb8a --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/sigmoid_parameter.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_SIGMOID_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_SIGMOID_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct SigmoidParameter { + // primitive parameter + OpParameter op_parameter_; + + // shape correlative + const int *in_shape_; + const int *out_shape_; + + // other parameter + SigmoidQuantArg quant_arg; + double alpha_; + int thread_count_; + int64_t offset_[MAX_SHAPE_SIZE]; + int64_t in_offset_[MAX_SHAPE_SIZE]; + int64_t axis_; + int input_dim_; + int element_num; +} SigmoidParameter; + +#endif // MINDSPORE_LITE_NNACL_SIGMOID_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/skip_gram_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/skip_gram_parameter.h new file mode 100755 index 00000000000..26d3c28cbfb --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/skip_gram_parameter.h @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_SKIP_GRAM_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_SKIP_GRAM_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct SkipGramParameter { + // primitive parameter + OpParameter op_parameter_; + bool include_all_ngrams; + int max_skip_size; + int ngram_size; +} SkipGramParameter; + +#endif // MINDSPORE_LITE_NNACL_SKIP_GRAM_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/slice_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/slice_parameter.h new file mode 100755 index 00000000000..d3627965e5e --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/slice_parameter.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_SLICE_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_SLICE_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct SliceQuantArg { + QuantArg in_args_; + QuantArg out_args_; + int output_activation_min_; + int output_activation_max_; +} SliceQuantArg; + +typedef struct SliceParameter { + // primitive parameter + OpParameter op_parameter_; + + // shape correlative + int32_t shape_[COMM_SHAPE_SIZE]; + int32_t begin_[COMM_SHAPE_SIZE]; + int32_t end_[COMM_SHAPE_SIZE]; + int32_t size_[COMM_SHAPE_SIZE]; + int32_t axis_[COMM_SHAPE_SIZE]; + + // other parameter + SliceQuantArg quant_arg_; + int32_t param_length_; +} SliceParameter; + +#endif // MINDSPORE_LITE_NNACL_SLICE_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/softmax_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/softmax_parameter.h new file mode 100755 index 00000000000..902ffb7485b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/softmax_parameter.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_SOFTMAX_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_SOFTMAX_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct SoftmaxParameter { + // primitive parameter + OpParameter op_parameter_; + int32_t axis_; + + // shape correlative + int input_shape_[5]; + + // other parameter + int element_size_; + int n_dim_; +} SoftmaxParameter; + +#endif // MINDSPORE_LITE_NNACL_SOFTMAX_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/space_to_depth_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/space_to_depth_parameter.h new file mode 100755 index 00000000000..7afaca98e86 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/space_to_depth_parameter.h @@ -0,0 +1,26 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_SRC_BACKEND_ARM_NNACL_SPACE_TO_DEPTH_PARAMETER_H_ +#define MINDSPORE_LITE_SRC_BACKEND_ARM_NNACL_SPACE_TO_DEPTH_PARAMETER_H_ +#include "nnacl/op_base.h" + +typedef struct SpaceToDepthParameter { + // primitive parameter + OpParameter op_parameter_; + int32_t block_size_; +} SpaceToDepthParameter; + +#endif // MINDSPORE_LITE_SRC_BACKEND_ARM_NNACL_SPACE_TO_DEPTH_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/sparse_to_dense_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/sparse_to_dense_parameter.h new file mode 100755 index 00000000000..41c0e22087a --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/sparse_to_dense_parameter.h @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_SPARSE_TO_DENSE_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_SPARSE_TO_DENSE_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct SparseToDenseParameter { + // primitive parameter + OpParameter op_parameter_; + bool validate_indices_; + + // other parameter + int thread_num_; +} SparseToDenseParameter; + +#endif // MINDSPORE_LITE_NNACL_SPARSE_TO_DENSE_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/splice_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/splice_parameter.h new file mode 100755 index 00000000000..8063960af7e --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/splice_parameter.h @@ -0,0 +1,29 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_SPLICE_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_SPLICE_PARAMETER_H_ +#include "nnacl/op_base.h" +typedef struct SpliceParameter { + OpParameter op_parameter_; + int context_dim_; + int forward_indexes_dim_; + int src_to_dst_row_offset_; + int *context_; + int *forward_indexes_; + int output_dim_; +} SpliceParameter; +#endif // MINDSPORE_LITE_NNACL_SPLICE_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/split_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/split_parameter.h new file mode 100755 index 00000000000..7eeb4a6212c --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/split_parameter.h @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_SPLIT_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_SPLIT_PARAMETER_H_ + +#include "nnacl/op_base.h" + +#define SPLIT_STRIDES_SIZE 32 + +typedef struct SplitQuantArg { + QuantArg in_args_; + QuantArg out_args_[20]; + int output_activation_min_; + int output_activation_max_; +} SplitQuantArg; + +typedef struct SplitParameter { + // primitive parameter + OpParameter op_parameter_; + int num_split_; + int *split_sizes_; + int split_dim_; + + // shape correlative + int strides_[SPLIT_STRIDES_SIZE]; + + // other parameter + SplitQuantArg quant_arg_; + int n_dims_; + int split_count_; +} SplitParameter; + +#endif // MINDSPORE_LITE_NNACL_SPLIT_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/squeeze_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/squeeze_parameter.h new file mode 100755 index 00000000000..77a419aa12d --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/squeeze_parameter.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_SQUEEZE_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_SQUEEZE_PARAMETER_H_ +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" + +#define SQUEEZE_OFFSET_MAX_SIZE 4 + +typedef struct SqueezeQuantArg { + QuantArg *in_quant_args_; + QuantArg *out_quant_args_; +} SqueezeQuantArg; + +typedef struct SqueezeParameter { + // primitive parameter + OpParameter op_parameter_; + int axis_[8]; + size_t axis_size_; + + // shape correlative + const int *in_shape_; + const int *out_shape_; + int offset_size_; + int64_t offset_[SQUEEZE_OFFSET_MAX_SIZE]; + int64_t in_offset_[SQUEEZE_OFFSET_MAX_SIZE]; + int input_dim_; + // other parameter + SqueezeQuantArg quant_arg; +} SqueezeParameter; + +#endif // MINDSPORE_LITE_NNACL_SQUEEZE_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/stack_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/stack_parameter.h new file mode 100755 index 00000000000..1f714aad5a2 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/stack_parameter.h @@ -0,0 +1,27 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_STACK_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_STACK_PARAMETER_H_ + +#include "nnacl/op_base.h" +typedef struct StackParameter { + // primitive parameter + OpParameter op_parameter_; + int32_t axis_; +} StackParameter; + +#endif // MINDSPORE_LITE_NNACL_STACK_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/strided_slice_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/strided_slice_parameter.h new file mode 100755 index 00000000000..91fa2d65713 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/strided_slice_parameter.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_STRIDED_SLICE_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_STRIDED_SLICE_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct StridedSliceParameter { + // primitive parameter + OpParameter op_parameter_; + int begins_[MAX_SHAPE_SIZE]; + int ends_[MAX_SHAPE_SIZE]; + int strides_[MAX_SHAPE_SIZE]; + int isScale; + + // shape correlative + int in_shape_length_; + int in_shape_[MAX_SHAPE_SIZE]; + + // other parameter + int num_axes_; + LiteDataType data_type; + int begins_mask_; + int ends_mask_; + int ellipsisMask_; + int newAxisMask_; + int shrinkAxisMask_; +} StridedSliceParameter; + +#endif // MINDSPORE_LITE_NNACL_STRIDED_SLICE_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/tensor_c.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/tensor_c.h new file mode 100755 index 00000000000..482bd361f6f --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/tensor_c.h @@ -0,0 +1,29 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_TENSOR_C_H_ +#define MINDSPORE_LITE_NNACL_TENSOR_C_H_ +#include "nnacl/op_base.h" + +typedef struct TensorC { + bool is_ready_; + int data_type_; + int format_; + void *data_; + size_t shape_size_; + int shape_[MAX_SHAPE_SIZE]; +} TensorC; + +#endif // MINDSPORE_LITE_NNACL_TENSOR_C_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/tensorlist_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/tensorlist_parameter.h new file mode 100755 index 00000000000..0cf81569135 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/tensorlist_parameter.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_TENSORLIST_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_TENSORLIST_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct TensorListParameter { + // primitive parameter + OpParameter op_parameter_; + int shape_type_; + int element_dtype_; + + // other parameter + int num_element_; +} TensorListParameter; + +#endif // MINDSPORE_LITE_NNACL_ARG_TENSORLIST_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/transpose.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/transpose.h new file mode 100755 index 00000000000..b69ddabfb18 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/transpose.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_TRANSPOSE_H_ +#define MINDSPORE_LITE_NNACL_TRANSPOSE_H_ + +#include "nnacl/op_base.h" + +#define MAX_TRANSPOSE_DIM_SIZE 6 + +typedef struct TransposeParameter { + // primitive parameter + OpParameter op_parameter_; + int perm_[MAX_SHAPE_SIZE]; + size_t perm_size_; + bool conjugate_; + + // shape correlative + int strides_[MAX_SHAPE_SIZE]; + int out_strides_[MAX_SHAPE_SIZE]; + + // other parameter + int num_axes_; + int data_size_; +} TransposeParameter; + +#endif // MINDSPORE_LITE_NNACL_TRANSPOSE_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/unsqueeze_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/unsqueeze_parameter.h new file mode 100755 index 00000000000..e543d27209e --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/unsqueeze_parameter.h @@ -0,0 +1,48 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_UNSQUEEZE_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_UNSQUEEZE_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct UnSqueezeQuantArg { + int *output_shape_; + float alpha; + int axis_; + size_t input_num_; + QuantArg in_quant_args_; + QuantArg out_quant_args_; +} UnSqueezeQuantArg; + +typedef struct UnSqueezeParameter { + // primitive parameter + OpParameter op_parameter_; + int dims_[COMM_SHAPE_SIZE]; + int num_dim_; + + // shape correlative + const int *in_shape_; + const int *out_shape_; + int64_t offset_[COMM_SHAPE_SIZE]; + int64_t axis_; + + // other parameter + UnSqueezeQuantArg quant_arg; + int thread_count_; +} UnSqueezeParameter; + +#endif // MINDSPORE_LITE_NNACL_UNSQUEEZE_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/unstack_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/unstack_parameter.h new file mode 100755 index 00000000000..09471839f61 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/unstack_parameter.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_UNSTACK_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_UNSTACK_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct UnstackParameter { + // primitive parameter + OpParameter op_parameter_; + int num_; + int axis_; + + // other parameter + int pre_dims_; + int axis_dim_; + int after_dims_; +} UnstackParameter; + +#endif // MINDSPORE_LITE_NNACL_UNSTACK_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/upsample_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/upsample_parameter.h new file mode 100755 index 00000000000..ab50e51c83f --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/upsample_parameter.h @@ -0,0 +1,29 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_UPSAMPLE_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_UPSAMPLE_PARAMETER_H_ + +#include "nnacl/op_base.h" +typedef struct { + // primitive parameter + OpParameter op_parameter_; + + // other parameter + int method_; // 0 for bilinear; 1 for nearest +} UpsampleParameter; + +#endif // MINDSPORE_LITE_NNACL_UPSAMPLE_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/where_parameter.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/where_parameter.h new file mode 100755 index 00000000000..9480d6e2fe2 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/nnacl/where_parameter.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_WHERE_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_WHERE_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct WhereParameter { + // primitive parameter + OpParameter op_parameter_; + + // other parameter + int condition_num_; + int x_num_; + int y_num_; + int max_num_; + + int rank_; + int thread_num_; +} WhereParameter; + +#endif // MINDSPORE_LITE_NNACL_WHERE_PARAMETER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/base/common_wrapper.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/base/common_wrapper.h new file mode 100755 index 00000000000..a68fdf8240b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/base/common_wrapper.h @@ -0,0 +1,24 @@ +/* + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_COMMON_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_COMMON_WRAPPER_H_ + +#include "nnacl/op_base.h" + +bool GetSupportOptFlag(); + +#endif // MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_COMMON_WRAPPER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/base/detection_post_process_base_wrapper.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/base/detection_post_process_base_wrapper.h new file mode 100755 index 00000000000..badec6c0430 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/base/detection_post_process_base_wrapper.h @@ -0,0 +1,24 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_BASE_DETECTION_POST_PROCESS_BASE_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_BASE_DETECTION_POST_PROCESS_BASE_WRAPPER_H_ + +#include + +void PartialArgSort(const float *scores, int *indexes, int num_to_sort, int num_values); + +#endif // MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_BASE_DETECTION_POST_PROCESS_BASE_WRAPPER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/base/optimize_handler_wrapper.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/base/optimize_handler_wrapper.h new file mode 100755 index 00000000000..40e82acbaba --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/base/optimize_handler_wrapper.h @@ -0,0 +1,41 @@ +/* + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_OPTIMIZE_HANDLER_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_OPTIMIZE_HANDLER_WRAPPER_H_ + +#include "nnacl/op_base.h" + +#ifdef ENABLE_ARM64 +void IndirectGemmInt8_optimize_handler(int8_t *dst, const int8_t *src, const int8_t *weight, const int32_t *bias, + size_t ksize, size_t ic4, size_t output_channel, size_t offset, + const int32_t *input_sum, size_t act_min, size_t act_max, size_t out_zp, + int32_t *out_multiplier, int32_t *shift_before, int32_t *shift_after, + size_t asymmetric, size_t per_channel, size_t per_channel_offset); +void MatMulR4Int8_optimize_handler(const int8_t *a, const int8_t *b, int *dst, int row4, int col4, int deep16, + const int *input_sum, const int *bias); + +void MatMulRInt8_optimize_handler(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_4, + size_t stride, const int32_t *input_sum, const int32_t *bias, int32_t *left_shift, + int32_t *right_shift, int32_t *multiplier, int32_t output_zp, int32_t mini, + int32_t maxi, size_t per_channel); +void MatMulDpInt8_optimize_handler(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_4, + size_t stride, const int32_t *input_sum, const int32_t *bias, int32_t *left_shift, + int32_t *right_shift, int32_t *multiplier, int32_t output_zp, int32_t mini, + int32_t maxi, size_t per_channel, int32_t *filter_zp); +#endif + +#endif // MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_OPTIMIZE_HANDLER_WRAPPER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/fp32/dequant_int8_to_fp32_wrapper.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/fp32/dequant_int8_to_fp32_wrapper.h new file mode 100755 index 00000000000..ad581e0537e --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/fp32/dequant_int8_to_fp32_wrapper.h @@ -0,0 +1,48 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_FP32_DEQUANT_TO_INT8_FP32_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_FP32_DEQUANT_TO_INT8_FP32_WRAPPER_H_ +#include +#include +typedef struct DeQuantArg { + float scale; + int32_t zeroPoint; + float var_corr; + float mean_corr; + float *clusters; + int clusters_nums; + int bitNum; +} DeQuantArg; + +#ifdef __cplusplus +extern "C" { +#endif + +void DequantDataPerChannel(const int8_t *quant_src, const DeQuantArg **de_quant_args, size_t de_quant_nums, + size_t per_batch_size, float *de_quant_dst); + +void DequantData(const int8_t *quant_src, const DeQuantArg **de_quant_args, size_t de_quant_nums, size_t channels, + float *de_quant_dst); + +void DequantDataPerTensor(const int8_t *quant_src, const DeQuantArg **de_quant_args, size_t de_quant_nums, + float *de_quant_dst); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_FP32_DEQUANT_TO_INT8_FP32_WRAPPER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/fp32/matmul_fp32_wrapper.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/fp32/matmul_fp32_wrapper.h new file mode 100755 index 00000000000..89fba5e3a48 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/fp32/matmul_fp32_wrapper.h @@ -0,0 +1,33 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_FP32_MATMUL_FP32_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_FP32_MATMUL_FP32_WRAPPER_H_ +#include +#include "nnacl/fp32/matmul_fp32.h" +#ifdef __cplusplus +extern "C" { +#endif + +void InitMatrixA(const float *src_ptr, float *dst_ptr, const MatMulParameter *params_, bool is_vector_a); + +void InitMatrixB(const float *src_ptr, float *dst_ptr, const MatMulParameter *params_, bool is_vector_a); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_FP32_MATMUL_FP32_WRAPPER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/add_int8_wrapper.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/add_int8_wrapper.h new file mode 100755 index 00000000000..791ee2cf577 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/add_int8_wrapper.h @@ -0,0 +1,49 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_ADD_INT8_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_ADD_INT8_WRAPPER_H_ +#include +#include "nnacl/int8/matmul_int8.h" +#include "nnacl/int8/add_int8.h" +#include "nnacl/arithmetic.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + AddQuantParameter *para_; + ArithmeticParameter *arith_para_; + int in_size_; + int out_size_; + int thread_count_; + int elements_num_; + bool support_opt_add_; + int8_t *input0_data_; + int8_t *input1_data_; + int8_t *output_data_; +} AddInt8Args; + +int AddBroadcastInt8Run(void *cdata, int task_id); + +int AddInt8Run(void *cdata, int task_id); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_ADD_INT8_WRAPPER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/batchnorm_int8_wrapper.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/batchnorm_int8_wrapper.h new file mode 100755 index 00000000000..a3b3c876373 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/batchnorm_int8_wrapper.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_BATCHNORM_INT8_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_BATCHNORM_INT8_WRAPPER_H_ + +#include +#include "nnacl/batchnorm_parameter.h" +typedef struct BatchNormArgs { + int8_t *in_addr_; + int8_t *out_addr_; + float *alpha_addr_; + float *beta_addr_; + BatchNormParameter *batchnorm_param_; +} BatchNormArgs; + +int BatchNormInt8Run(void *cdata, int task_id); + +#endif // MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_BATCHNORM_INT8_WRAPPER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/concat_int8_wrapper.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/concat_int8_wrapper.h new file mode 100755 index 00000000000..f7c8fc4e200 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/concat_int8_wrapper.h @@ -0,0 +1,35 @@ +/* + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_INT8_CONCAT_WRAPPER_INT8_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_INT8_CONCAT_WRAPPER_INT8_WRAPPER_H_ + +#include "nnacl/errorcode.h" +#include "nnacl/concat_parameter.h" +#include "nnacl/int8/concat_int8.h" + +typedef struct { + int8_t **inputs_; + int8_t *output_; + ConcatParameter *para_; + int axis_; + int64_t before_axis_size_; + int64_t count_unit_; +} ConcatInt8Args; + +int ConcatInt8Run(void *cdata, int task_id); + +#endif // MINDSPORE_LITE_MICRO_INT8_CONCAT_WRAPPER_INT8_WRAPPER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/conv1x1_init_int8_wrapper.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/conv1x1_init_int8_wrapper.h new file mode 100755 index 00000000000..72380ba8921 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/conv1x1_init_int8_wrapper.h @@ -0,0 +1,28 @@ +/* + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_CONV1X1_INIT_INT8_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_CONV1X1_INIT_INT8_H_ + +#include +#include +#include "nnacl/conv_parameter.h" + +int Conv1x1Init(int8_t *src_weight, int32_t *src_bias, int32_t *filter_zps, int32_t input_channel, + int32_t output_channel, int32_t input_zp, bool support_optimize, bool filter_peroc, + int8_t **packed_weight, int32_t **bias_data); + +#endif // MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_CONV1X1_INIT_INT8_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/conv1x1_run_int8_wrapper.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/conv1x1_run_int8_wrapper.h new file mode 100755 index 00000000000..310bec30dd0 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/conv1x1_run_int8_wrapper.h @@ -0,0 +1,48 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_CONV1X1_RUN_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_CONV1X1_RUN_H_ + +#include +#include +#include "nnacl/conv_parameter.h" +#include "nnacl/matmul_parameter.h" + +typedef struct { + int32_t *input_sum_; /* per-oc */ + int32_t *filter_zp_ptr_; /* per-oc up round */ + int32_t *left_shift_; /* per-oc up round */ + int32_t *right_shift_; /* per-oc up round */ + int32_t *multiplier_; /* per-oc up round */ + int8_t *packed_weight_; + int32_t *bias_data_; + int8_t *packed_input_; + int8_t *input_ptr_; + int8_t *output_ptr_; + size_t thread_stride_hw_; + size_t thread_stride_oc_; + ConvParameter *conv_param_; + MatMulParameter *matmul_param_; + MATMUL_OPT_DP_FUNC matmul_func_; + bool pre_trans_input_; + bool support_optimize_; + bool filter_peroc_; +} Conv1x1Args; + +void Conv1x1Run(int8_t *src_in, Conv1x1Args *args, int8_t *src_out); + +#endif // MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_CONV1X1_RUN_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/conv_init_int8_wrapper.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/conv_init_int8_wrapper.h new file mode 100755 index 00000000000..819cad60f1a --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/conv_init_int8_wrapper.h @@ -0,0 +1,26 @@ +/* + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_CONV_INIT_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_CONV_INIT_H_ + +#include +#include + +int ConvInit(int8_t *origin_weight, const int32_t *ori_bias, const int32_t *filter_quant_zps, int kernel_h, + int kernel_w, int input_channel, int output_channel, int32_t input_zp, bool filter_peroc, + bool support_optimize, int8_t **packed_weight, int32_t **bias_data); + +#endif // MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_CONV_INIT_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/convolution_depthwise_int8_wrapper.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/convolution_depthwise_int8_wrapper.h new file mode 100755 index 00000000000..6df27d027fe --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/convolution_depthwise_int8_wrapper.h @@ -0,0 +1,35 @@ +/* + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_INT8_CONVOLUTION_DEPTHWISE_WRAPPER_INT8_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_INT8_CONVOLUTION_DEPTHWISE_WRAPPER_INT8_WRAPPER_H_ + +#include "nnacl/errorcode.h" +#include "nnacl/conv_parameter.h" +#include "nnacl/int8/conv_depthwise_int8.h" + +typedef struct { + int8_t *output_data_; + int32_t *row_buffer_; + const int8_t *input_data_; + const int16_t *weight_data_; + const int32_t *bias_data_; + const ConvParameter *conv_param_; +} ConvDepthwiseInt8Args; + +int ConvDepthwiseInt8Run(void *cdata, int task_id); + +#endif // MINDSPORE_LITE_MICRO_INT8_CONVOLUTION_DEPTHWISE_WRAPPER_INT8_WRAPPER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/convolution_int8_wrapper.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/convolution_int8_wrapper.h new file mode 100755 index 00000000000..082ccf41567 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/convolution_int8_wrapper.h @@ -0,0 +1,41 @@ +/* + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_INT8_CONVOLUTION_WRAPPER_INT8_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_INT8_CONVOLUTION_WRAPPER_INT8_WRAPPER_H_ + +#include "nnacl/errorcode.h" +#include "nnacl/conv_parameter.h" +#include "nnacl/matmul_parameter.h" +#include "nnacl/int8/conv_int8.h" + +typedef struct { + int8_t *input_data_; + int8_t *packed_input_; + int8_t *matmul_input_; + int8_t *packed_weight_; + const int32_t *bias_data_; + int8_t *output_data_; + int32_t *filter_zp_; + int32_t *input_sum_; + ConvParameter *conv_param_; + MATMUL_OPT_R_FUNC matmul_func_; + bool is_optimize_; +} ConvolutionInt8Args; + +int ConvolutionInt8Run(void *cdata, int task_id); + +#endif // MINDSPORE_LITE_MICRO_INT8_CONVOLUTION_WRAPPER_INT8_WRAPPER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/matmul_int8_wrapper.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/matmul_int8_wrapper.h new file mode 100755 index 00000000000..72fab6a069c --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/matmul_int8_wrapper.h @@ -0,0 +1,36 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_MATMUL_INT8_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_MATMUL_INT8_WRAPPER_H_ +#include +#include "nnacl/int8/matmul_int8.h" +#ifdef __cplusplus +extern "C" { +#endif + +void InitInt8MatrixA(int8_t *src_ptr, int32_t *input_sums, int8_t *dst_ptr, int batch, int row, int deep, int input_zp, + const int *weight_zp, bool a_transpose); + +void InitInt8MatrixB(int8_t *weight_ptr, int32_t *weight_bias_sums_batch_, int8_t *dst_ptr, int batch, int deep, + int col, int col_align, int deep_16, int input_zp, int *weight_zp, const int *bias_ptr, + bool b_transpose, bool filter_per_channel); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_MATMUL_INT8_WRAPPER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/resize_int8_wrapper.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/resize_int8_wrapper.h new file mode 100755 index 00000000000..6721109e173 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/resize_int8_wrapper.h @@ -0,0 +1,41 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_WRAPPER_INT8_RESIZE_INT8_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_WRAPPER_INT8_RESIZE_INT8_WRAPPER_H_ + +#include "nnacl/int8/resize_int8.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + const int8_t *input_data_; + int8_t *output_data_; + const int *input_shape_; + const int *output_shape_; + const bool align_corners_; + int thread_num_; +} ResizeInt8Args; + +int ResizeInt8Run(void *cdata, int task_id); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_MICRO_WRAPPER_INT8_RESIZE_INT8_WRAPPER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/slice_int8_wrapper.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/slice_int8_wrapper.h new file mode 100755 index 00000000000..0ecbc875cf4 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/include/wrapper/int8/slice_int8_wrapper.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_SLICE_INT8_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_SLICE_INT8_WRAPPER_H_ + +#include +#include "nnacl/slice_parameter.h" + +typedef struct SliceArgs { + int8_t *input_data_; + int8_t *output_data_; + SliceParameter *param_; +} SliceArgs; + +int SliceInt8Run(void *cdata, int task_id); + +#endif // MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_SLICE_INT8_WRAPPER_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/CMakeLists.txt b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/CMakeLists.txt new file mode 100755 index 00000000000..d673e79316b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/CMakeLists.txt @@ -0,0 +1,19 @@ +cmake_minimum_required (VERSION 3.6) + +project(CMSISNNConvolutions) + +include(configLib) + +file(GLOB SRC "./*_*.c") + +add_library(CMSISNNConvolutions STATIC ${SRC}) + +configLib(CMSISNNConvolutions ${ROOT}) +configDsp(CMSISNNConvolutions ${ROOT}) + +### Includes +target_include_directories(CMSISNNConvolutions PUBLIC "${NN}/Include") +target_include_directories(CMSISNNConvolutions PUBLIC "${ROOT}/CMSIS/DSP/Include") + + + diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_1_x_n_s8.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_1_x_n_s8.c new file mode 100755 index 00000000000..277c206d351 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_1_x_n_s8.c @@ -0,0 +1,193 @@ +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_1_x_n_s8.c + * Description: s8 version of 1xN convolution using symmetric quantization. + * + * $Date: February 27, 2020 + * $Revision: V.1.0.1 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ +#include "arm_math.h" +#include "arm_nnfunctions.h" +#include "arm_nnsupportfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * 1xN s8 convolution function. + * + * Refer header file for details. + * + */ + +arm_status arm_convolve_1_x_n_s8(const q7_t *input, + const uint16_t input_x, + const uint16_t input_ch, + const uint16_t input_batches, + const q7_t *kernel, + const uint16_t output_ch, + const uint16_t kernel_x, + const uint16_t pad_x, + const uint16_t stride_x, + const int32_t *bias, + q7_t *output, + const int32_t *output_shift, + const int32_t *output_mult, + const int32_t out_offset, + const int32_t input_offset, + const int32_t out_activation_min, + const int32_t out_activation_max, + const uint16_t output_x, + q15_t *buffer_a) +{ + (void)input_batches; + + arm_status status = ARM_MATH_SUCCESS; + if (output_x % 4 != 0) + { + status = ARM_MATH_SIZE_MISMATCH; + goto out; + } + +#if defined(ARM_MATH_MVEI) + for (int i_out_x = 0; i_out_x <= (output_x - 4); i_out_x += 4) + { + int32_t input_begin_idx[4]; + int32_t ker_begin_idx[4]; + int32_t ker_end_idx[4]; + + for (int i = 0; i < 4; i++) + { + const int32_t est_input_x_idx = stride_x * (i_out_x + i) - pad_x; + input_begin_idx[i] = MAX(0, est_input_x_idx); + ker_begin_idx[i] = MAX(0, -est_input_x_idx); + ker_end_idx[i] = MIN(kernel_x, input_x - est_input_x_idx); + } + + for (int i_out_ch = 0; i_out_ch < output_ch; i_out_ch++) + { + int32x4_t s_offset; + int32_t acc[4]; + if ((ker_begin_idx[0] != 0) || (ker_end_idx[3] != kernel_x)) + { + int32_t sum_row[4]; + + (void)arm_nn_mat_mul_core_1x_s8((ker_end_idx[0] - ker_begin_idx[0]) * input_ch, + input + input_begin_idx[0] * input_ch, + kernel + (input_ch * kernel_x * i_out_ch) + (ker_begin_idx[0] * input_ch), + &sum_row[0], + &acc[0]); + (void)arm_nn_mat_mul_core_1x_s8((ker_end_idx[1] - ker_begin_idx[1]) * input_ch, + input + input_begin_idx[1] * input_ch, + kernel + (input_ch * kernel_x * i_out_ch) + (ker_begin_idx[1] * input_ch), + &sum_row[1], + &acc[1]); + + (void)arm_nn_mat_mul_core_1x_s8((ker_end_idx[2] - ker_begin_idx[2]) * input_ch, + input + input_begin_idx[2] * input_ch, + kernel + (input_ch * kernel_x * i_out_ch) + (ker_begin_idx[2] * input_ch), + &sum_row[2], + &acc[2]); + + (void)arm_nn_mat_mul_core_1x_s8((ker_end_idx[3] - ker_begin_idx[3]) * input_ch, + input + input_begin_idx[3] * input_ch, + kernel + (input_ch * kernel_x * i_out_ch) + (ker_begin_idx[3] * input_ch), + &sum_row[3], + &acc[3]); + + s_offset = vldrwq_s32(sum_row); + } + else + { + int32_t sum_row; + (void)arm_nn_mat_mul_core_4x_s8(kernel_x * input_ch, + stride_x * input_ch, + input + input_begin_idx[0] * input_ch, + kernel + (input_ch * kernel_x * i_out_ch), + &sum_row, + acc); + + s_offset = vdupq_n_s32(sum_row); + } + int32x4_t res = vldrwq_s32(acc); + s_offset = vmulq_n_s32(s_offset, input_offset); + + res = vaddq_n_s32(res, bias[i_out_ch]); + res = vaddq_s32(res, s_offset); + res = arm_requantize_mve(res, output_mult[i_out_ch], output_shift[i_out_ch]); + res = vaddq_n_s32(res, out_offset); + + res = vmaxq_s32(res, vdupq_n_s32(out_activation_min)); + res = vminq_s32(res, vdupq_n_s32(out_activation_max)); + + const uint32x4_t scatter_offset = {0, output_ch, output_ch * 2, output_ch * 3}; + vstrbq_scatter_offset_s32(output, scatter_offset, res); + output++; + } + output += (3 * output_ch); + } + +#else +#define DIM_Y (1) + status = arm_convolve_s8(input, input_x, DIM_Y, + input_ch, 1, kernel, output_ch, + kernel_x, DIM_Y, + pad_x, 0, + stride_x, DIM_Y, + bias, output, + output_shift, output_mult, + out_offset, input_offset, + out_activation_min, out_activation_max, + output_x, DIM_Y, + buffer_a); +#endif + +out: + /* Return to application */ + return status; +} + +int32_t arm_convolve_1_x_n_s8_get_buffer_size(const uint16_t input_ch, + const uint16_t kernel_x, + const uint16_t kernel_y) +{ +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) + return (2 * input_ch * kernel_x * kernel_y) * sizeof(int16_t); +#else + (void)input_ch; + (void)kernel_x; + (void)kernel_y; + return 0; +#endif +} + +/** + * @} end of NNConv group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_1x1_HWC_q7_fast_nonsquare.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_1x1_HWC_q7_fast_nonsquare.c new file mode 100755 index 00000000000..68f95f787dd --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_1x1_HWC_q7_fast_nonsquare.c @@ -0,0 +1,236 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_1x1_HWC_q7_fast_nonsquare.c + * Description: Fast Q7 version of 1x1 convolution (non-square shape) + * + * $Date: 17. January 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/** + * @brief Fast Q7 version of 1x1 convolution (non-sqaure shape) + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in_x input tensor dimention x + * @param[in] dim_im_in_y input tensor dimention y + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel_x filter kernel size x + * @param[in] dim_kernel_y filter kernel size y + * @param[in] padding_x padding size x + * @param[in] padding_y padding size y + * @param[in] stride_x convolution stride x + * @param[in] stride_y convolution stride y + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out_x output tensor dimension x + * @param[in] dim_im_out_y output tensor dimension y + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + * + * This function is optimized for convolution with 1x1 kernel size (i.e., dim_kernel_x=1 + * and dim_kernel_y=1). It can be used for the second half of MobileNets [1] after depthwise + * separable convolution. + * + * This function is the version with full list of optimization tricks, but with + * some contraints: + * ch_im_in is multiple of 4 + * ch_im_out is multiple of 2 + * + * [1] MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications + * https://arxiv.org/abs/1704.04861 + */ + +arm_status arm_convolve_1x1_HWC_q7_fast_nonsquare(const q7_t * Im_in, + const uint16_t dim_im_in_x, + const uint16_t dim_im_in_y, + const uint16_t ch_im_in, + const q7_t * wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel_x, + const uint16_t dim_kernel_y, + const uint16_t padding_x, + const uint16_t padding_y, + const uint16_t stride_x, + const uint16_t stride_y, + const q7_t * bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t * Im_out, + const uint16_t dim_im_out_x, + const uint16_t dim_im_out_y, + q15_t * bufferA, + q7_t * bufferB) +{ + (void)bufferB; +#if defined (ARM_MATH_DSP) + /* Run the following code for Cortex-M4 and Cortex-M7 */ + (void)dim_im_in_y; + int16_t i_out_y, i_out_x; + int16_t i_ch_out; + + /* ----------------------- + * Here we use bufferA as q15_t internally as computation are done with q15_t level + * im2col are done to output in q15_t format from q7_t input + */ + + q15_t *pBuffer = bufferA; + q7_t *pOut = Im_out; + + if (ch_im_in % 4 != 0 || ch_im_out % 2 != 0 || dim_kernel_x != 1 || dim_kernel_y != 1 + || padding_x != 0 || padding_y != 0 || stride_x != 1 || stride_y != 1) + { + /* check if the input dimension meets the constraints */ + return ARM_MATH_SIZE_MISMATCH; + } + + for (i_out_y = 0; i_out_y < dim_im_out_y; i_out_y++) + { + for (i_out_x = 0; i_out_x < dim_im_out_x; i_out_x++) + { + /* This part implements the im2col function */ + arm_q7_to_q15_reordered_no_shift((q7_t *) Im_in + (i_out_y * dim_im_in_x + i_out_x) * ch_im_in, pBuffer, + ch_im_in); + pBuffer += ch_im_in; + + if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel_x * dim_kernel_y) + { + pOut = + arm_nn_mat_mult_kernel_q7_q15_reordered(wt, bufferA, ch_im_out, ch_im_in, bias_shift, out_shift, bias, pOut); + /* counter reset */ + pBuffer = bufferA; + } + } + } + + /* check if there is left-over for compute */ + if (pBuffer != bufferA) + { + const q7_t *pA = wt; + for (i_ch_out = 0; i_ch_out < ch_im_out; i_ch_out++) + { + q31_t sum = ((q31_t)(bias[i_ch_out]) << bias_shift) + NN_ROUND(out_shift); + const q15_t *pB = bufferA; + /* basically each time it process 4 entries */ + uint16_t colCnt = ch_im_in * dim_kernel_x * dim_kernel_y >> 2; + + while (colCnt) + { + + q31_t inA1, inA2; + q31_t inB1, inB2; + + pA = read_and_pad_reordered(pA, &inA1, &inA2); + + inB1 = arm_nn_read_q15x2_ia(&pB); + sum = __SMLAD(inA1, inB1, sum); + inB2 = arm_nn_read_q15x2_ia(&pB); + + sum = __SMLAD(inA2, inB2, sum); + + colCnt--; + } + colCnt = ch_im_in * dim_kernel_y * dim_kernel_x & 0x3; + while (colCnt) + { + q7_t inA1 = *pA++; + q15_t inB1 = *pB++; + sum += inA1 * inB1; + colCnt--; + } + *pOut = (q7_t) __SSAT((sum >> out_shift), 8); + pOut++; + + } + + } + +#else + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + + int i, j, k, l, m, n; + int conv_out; + int in_row, in_col; + + if (ch_im_in % 4 != 0 || ch_im_out % 2 != 0 || dim_kernel_x != 1 || dim_kernel_y != 1 + || padding_x != 0 || padding_y != 0 || stride_x != 1 || stride_y != 1) + { + /* check if the input dimension meets the constraints */ + return ARM_MATH_SIZE_MISMATCH; + } + + for (i = 0; i < ch_im_out; i++) + { + for (j = 0; j < dim_im_out_y; j++) + { + for (k = 0; k < dim_im_out_x; k++) + { + conv_out = ((q31_t)(bias[i]) << bias_shift) + NN_ROUND(out_shift); + for (m = 0; m < dim_kernel_y; m++) + { + for (n = 0; n < dim_kernel_x; n++) + { + // if-for implementation + in_row = stride_y * j + m - padding_y; + in_col = stride_x * k + n - padding_x; + if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in_y && in_col < dim_im_in_x) + { + for (l = 0; l < ch_im_in; l++) + { + conv_out += Im_in[(in_row * dim_im_in_x + in_col) * ch_im_in + l] * + wt[i * ch_im_in * dim_kernel_y * dim_kernel_x + (m * dim_kernel_y + n) * ch_im_in + l]; + } + } + } + } + Im_out[i + (j * dim_im_out_x + k) * ch_im_out] = (q7_t) __SSAT((conv_out >> out_shift), 8); + } + } + } + +#endif /* ARM_MATH_DSP */ + + /* Return to application */ + return ARM_MATH_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_1x1_s8_fast.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_1x1_s8_fast.c new file mode 100755 index 00000000000..4531bf3b63b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_1x1_s8_fast.c @@ -0,0 +1,185 @@ +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_1x1_s8_fast.c + * Description: Fast q7 version of 1x1 convolution (non-square shape) + * + * $Date: 7 February 2020 + * $Revision: V.1.0.2 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_nnfunctions.h" + +#define DIM_KER_X (1U) +#define DIM_KER_Y (1U) + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * Fast s8 version for 1x1 convolution (non-square shape) + * + * Refer header file for details. + * + */ + +arm_status arm_convolve_1x1_s8_fast(const q7_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_ch, + const uint16_t input_batches, + const q7_t *kernel, + const uint16_t output_ch, + const uint16_t pad_x, + const uint16_t pad_y, + const uint16_t stride_x, + const uint16_t stride_y, + const int32_t *bias, + q7_t *output, + const int32_t *output_shift, + const int32_t *output_mult, + const int32_t out_offset, + const int32_t input_offset, + const int32_t out_activation_min, + const int32_t out_activation_max, + const uint16_t output_x, + const uint16_t output_y, + q15_t *buffer_a) +{ + if (input_ch % 4 != 0 || + pad_x != 0 || pad_y != 0 || + stride_x != 1 || stride_y != 1) + { + return ARM_MATH_SIZE_MISMATCH; + } +#if defined(ARM_MATH_MVEI) + (void)buffer_a; + + int32_t col_len = input_x * input_y * input_batches; + + for (int i_items = 0; i_items <= (col_len - 4); i_items += 4) + { + for (int i_out_ch = 0; i_out_ch < output_ch; i_out_ch++) + { + int32_t sum_row = 0; + int32_t temp_out[4]; + + (void)arm_nn_mat_mul_core_4x_s8(input_ch, + input_ch, + input + i_items * input_ch, + kernel + i_out_ch * input_ch, + &sum_row, + temp_out); + int32x4_t res = vldrwq_s32(temp_out); + + res = vaddq_n_s32(res, bias[i_out_ch]); + sum_row = sum_row * input_offset; + res = vaddq_n_s32(res, sum_row); + res = arm_requantize_mve(res, output_mult[i_out_ch], output_shift[i_out_ch]); + res = vaddq_n_s32(res, out_offset); + + res = vmaxq_s32(res, vdupq_n_s32(out_activation_min)); + res = vminq_s32(res, vdupq_n_s32(out_activation_max)); + + const uint32x4_t scatter_offset = {0, output_ch, output_ch * 2, output_ch * 3}; + vstrbq_scatter_offset_s32(output, scatter_offset, res); + output++; + } + output += (3 * output_ch); + } + + /* Handle left over elements */ + for (int i_items = (col_len & ~0x3); i_items < col_len; i_items++) + { + for (int i_out_ch = 0; i_out_ch < output_ch; i_out_ch++) + { + int32_t sum_row = 0; + + int32_t acc; + (void)arm_nn_mat_mul_core_1x_s8(input_ch, + input + i_items * input_ch, + kernel + i_out_ch * input_ch, + &sum_row, + &acc); + + acc += bias[i_out_ch]; + sum_row = (sum_row * input_offset); + acc += sum_row; + acc = arm_nn_requantize(acc, output_mult[i_out_ch], output_shift[i_out_ch]); + acc += out_offset; + + acc = MAX(acc, out_activation_min); + acc = MIN(acc, out_activation_max); + *output++ = acc; + } + } + +#else + /* Run the following code as reference implementation for Cortex-M processors with or without DSP extension */ + + (void)input_x; + (void)input_y; + (void)output_x; + (void)output_y; + (void)buffer_a; + + const int32_t lhs_rows = input_x * input_y * input_batches; + const int32_t rhs_rows = output_ch; + const int32_t rhs_cols = input_ch; + + arm_nn_mat_mult_nt_t_s8(input, + kernel, + bias, + output, + output_mult, + output_shift, + lhs_rows, + rhs_rows, + rhs_cols, + input_offset, + out_offset, + out_activation_min, + out_activation_max); + +#endif + + /* Return to application */ + return ARM_MATH_SUCCESS; +} + + +int32_t arm_convolve_1x1_s8_fast_get_buffer_size(const uint16_t input_ch) +{ + (void)input_ch; + return 0; +} + +/** + * @} end of NNConv group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q15_basic.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q15_basic.c new file mode 100755 index 00000000000..427df47836b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q15_basic.c @@ -0,0 +1,207 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_HWC_q15_basic.c + * Description: Q15 version of convolution + * + * $Date: 17. January 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + + /** + * @brief Basic Q15 convolution function + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimention + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns ARM_MATH_SUCCESS + * + * @details + * + * Buffer size: + * + * bufferA size: ch_im_in*dim_kernel*dim_kernel + * + * bufferB size: 0 + * + * This basic version is designed to work for any input tensor and weight + * dimension. + */ + +arm_status +arm_convolve_HWC_q15_basic(const q15_t * Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q15_t * wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q15_t * bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q15_t * Im_out, + const uint16_t dim_im_out, + q15_t * bufferA, + q7_t * bufferB) +{ + (void)bufferB; +#if defined (ARM_MATH_DSP) + /* Run the following code for Cortex-M4 and Cortex-M7 */ + + int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; + + uint16_t im2col_out_pixel_index = 0; + q15_t *pBuffer = bufferA; + q15_t *pOut = Im_out; + q15_t *im_buffer = bufferA; + const q15_t *pA; + int i; + + /* This part implements the im2col function */ + for (i_out_y = 0; i_out_y < dim_im_out; i_out_y++) + { + for (i_out_x = 0; i_out_x < dim_im_out; i_out_x++) + { + for (i_ker_y = i_out_y * stride - padding; i_ker_y < i_out_y * stride - padding + dim_kernel; i_ker_y++) + { + for (i_ker_x = i_out_x * stride - padding; i_ker_x < i_out_x * stride - padding + dim_kernel; i_ker_x++) + { + if (i_ker_y < 0 || i_ker_y >= dim_im_in || i_ker_x < 0 || i_ker_x >= dim_im_in) + { + /* Filling 0 for out-of-bound paddings */ + /* arm_fill_q15(0, pBuffer, ch_im_in); */ + memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); + } else + { + /* arm_copy_q15((q15_t *) Im_in + (i_ker_y * dim_im_in + i_ker_x) * ch_im_in, pBuffer, ch_im_in); */ + memcpy(pBuffer, (q15_t *) Im_in + (i_ker_y * dim_im_in + i_ker_x) * ch_im_in, sizeof(q15_t)*ch_im_in); + } + pBuffer += ch_im_in; + } + } + + pA = wt; + for (i = 0; i < ch_im_out; i++) + { + q31_t sum = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); + const q15_t *pB = im_buffer; + uint16_t colCnt = ch_im_in * dim_kernel * dim_kernel >> 2; + while (colCnt) + { + q31_t inA1 = arm_nn_read_q15x2_ia(&pA); + q31_t inB1 = arm_nn_read_q15x2_ia(&pB); + q31_t inA2 = arm_nn_read_q15x2_ia(&pA); + q31_t inB2 = arm_nn_read_q15x2_ia(&pB); + + sum = __SMLAD(inA1, inB1, sum); + sum = __SMLAD(inA2, inB2, sum); + + colCnt--; + } + colCnt = ch_im_in * dim_kernel * dim_kernel & 0x3; + while (colCnt) + { + q15_t inA1 = *pA++; + q15_t inB1 = *pB++; + sum += inA1 * inB1; + colCnt--; + } + *pOut = (q15_t) __SSAT((sum >> out_shift), 16); + pOut++; + } + + /* counter reset */ + pBuffer = im_buffer; + im2col_out_pixel_index++; + } + } + +#else + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + uint16_t i, j, k, l, m, n; + int conv_out; + signed char in_row, in_col; + + for (i = 0; i < ch_im_out; i++) + { + for (j = 0; j < dim_im_out; j++) + { + for (k = 0; k < dim_im_out; k++) + { + conv_out = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); + for (m = 0; m < dim_kernel; m++) + { + for (n = 0; n < dim_kernel; n++) + { + in_row = stride * j + m - padding; + in_col = stride * k + n - padding; + if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in && in_col < dim_im_in) + { + for (l = 0; l < ch_im_in; l++) + { + conv_out += + Im_in[(in_row * dim_im_in + in_col) * ch_im_in + + l] * wt[i * ch_im_in * dim_kernel * dim_kernel + (m * dim_kernel + + n) * ch_im_in + l]; + } + } + } + } + Im_out[i + (j * dim_im_out + k) * ch_im_out] = (q15_t) __SSAT((conv_out >> out_shift), 16); + } + } + } + +#endif /* ARM_MATH_DSP */ + + /* Return to application */ + return ARM_MATH_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q15_fast.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q15_fast.c new file mode 100755 index 00000000000..799e168fd01 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q15_fast.c @@ -0,0 +1,255 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_HWC_q15_fast.c + * Description: Fast Q15 version of convolution + * + * $Date: 17. January 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + + /** + * @brief Fast Q15 convolution function + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimention + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + * + * @details + * + * Buffer size: + * + * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel + * + * bufferB size: 0 + * + * Input dimension constraints: + * + * ch_im_in is multiple of 2 + * + * ch_im_out is multipe of 2 + * + */ + +arm_status +arm_convolve_HWC_q15_fast(const q15_t * Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q15_t * wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q15_t * bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q15_t * Im_out, + const uint16_t dim_im_out, + q15_t * bufferA, + q7_t * bufferB) +{ + (void)bufferB; +#if defined (ARM_MATH_DSP) + int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; + + q15_t *pBuffer = bufferA; + q15_t *im_buffer = bufferA; + q15_t *pOut = Im_out; + + if (ch_im_in % 2 != 0 || ch_im_out % 2 != 0) + { + /* check if the input dimension meets the constraints */ + return ARM_MATH_SIZE_MISMATCH; + } + + /* Run the following code for Cortex-M4 and Cortex-M7 */ + + /* This part implements the im2col function */ + for (i_out_y = 0; i_out_y < dim_im_out; i_out_y++) + { + for (i_out_x = 0; i_out_x < dim_im_out; i_out_x++) + { + for (i_ker_y = i_out_y * stride - padding; i_ker_y < i_out_y * stride - padding + dim_kernel; i_ker_y++) + { + for (i_ker_x = i_out_x * stride - padding; i_ker_x < i_out_x * stride - padding + dim_kernel; i_ker_x++) + { + if (i_ker_y < 0 || i_ker_y >= dim_im_in || i_ker_x < 0 || i_ker_x >= dim_im_in) + { + /* arm_fill_q15(0, pBuffer, ch_im_in); */ + memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); + } else + { + /* arm_copy_q15((q15_t *) Im_in + (i_ker_y * dim_im_in + i_ker_x) * ch_im_in, pBuffer, ch_im_in); */ + memcpy(pBuffer, (q15_t *) Im_in + (i_ker_y * dim_im_in + i_ker_x) * ch_im_in, sizeof(q15_t)*ch_im_in); + } + pBuffer += ch_im_in; + } + } + + if (i_out_x & 0x1) + { + int i; + /* initialize the matrix pointers for A */ + const q15_t *pA = wt; + + /* set up the second output pointers */ + q15_t *pOut2 = pOut + ch_im_out; + + /* this loop over rows in A */ + for (i = 0; i < ch_im_out; i += 2) + { + /* setup pointers for B */ + const q15_t *pB = im_buffer; + const q15_t *pB2 = pB + ch_im_in * dim_kernel * dim_kernel; + + /* aling the second pointer for A */ + const q15_t *pA2 = pA + ch_im_in * dim_kernel * dim_kernel; + + /* init the sum with bias */ + q31_t sum = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); + q31_t sum2 = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); + q31_t sum3 = ((q31_t)bias[i + 1] << bias_shift) + NN_ROUND(out_shift); + q31_t sum4 = ((q31_t)bias[i + 1] << bias_shift) + NN_ROUND(out_shift); + + uint16_t colCnt = ch_im_in * dim_kernel * dim_kernel >> 1; + /* accumulate over the vector */ + while (colCnt) + { + q31_t inA1 = arm_nn_read_q15x2_ia(&pA); + q31_t inB1 = arm_nn_read_q15x2_ia(&pB); + q31_t inA2 = arm_nn_read_q15x2_ia(&pA2); + q31_t inB2 = arm_nn_read_q15x2_ia(&pB2); + + sum = __SMLAD(inA1, inB1, sum); + sum2 = __SMLAD(inA1, inB2, sum2); + sum3 = __SMLAD(inA2, inB1, sum3); + sum4 = __SMLAD(inA2, inB2, sum4); + + colCnt--; + } /* while over colCnt */ + colCnt = ch_im_in * dim_kernel * dim_kernel & 0x1; + while (colCnt) + { + q15_t inA1 = *pA++; + q15_t inB1 = *pB++; + q15_t inA2 = *pA2++; + q15_t inB2 = *pB2++; + + sum += inA1 * inB1; + sum2 += inA1 * inB2; + sum3 += inA2 * inB1; + sum4 += inA2 * inB2; + colCnt--; + } /* while over colCnt */ + *pOut++ = (q15_t) __SSAT(sum >> out_shift, 16); + *pOut++ = (q15_t) __SSAT(sum3 >> out_shift, 16); + *pOut2++ = (q15_t) __SSAT(sum2 >> out_shift, 16); + *pOut2++ = (q15_t) __SSAT(sum4 >> out_shift, 16); + + /* skip the row computed with A2 */ + pA += ch_im_in * dim_kernel * dim_kernel; + } /* for over ch_im_out */ + + pOut += ch_im_out; + /* counter reset */ + pBuffer = im_buffer; + } + } + } + +#else + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + uint16_t i, j, k, l, m, n; + int conv_out; + signed char in_row, in_col; + + if (ch_im_in % 2 != 0 || ch_im_out % 2 != 0) + { + /* check if the input dimension meets the constraints */ + return ARM_MATH_SIZE_MISMATCH; + } + + for (i = 0; i < ch_im_out; i++) + { + for (j = 0; j < dim_im_out; j++) + { + for (k = 0; k < dim_im_out; k++) + { + conv_out = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); + for (m = 0; m < dim_kernel; m++) + { + for (n = 0; n < dim_kernel; n++) + { + in_row = stride * j + m - padding; + in_col = stride * k + n - padding; + if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in && in_col < dim_im_in) + { + for (l = 0; l < ch_im_in; l++) + { + conv_out += + Im_in[(in_row * dim_im_in + in_col) * ch_im_in + + l] * wt[i * ch_im_in * dim_kernel * dim_kernel + (m * dim_kernel + + n) * ch_im_in + l]; + } + } + } + } + Im_out[i + (j * dim_im_out + k) * ch_im_out] = (q15_t) __SSAT((conv_out >> out_shift), 16); + } + } + } + +#endif /* ARM_MATH_DSP */ + + /* Return to application */ + return ARM_MATH_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q15_fast_nonsquare.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q15_fast_nonsquare.c new file mode 100755 index 00000000000..c8cd48aa3f6 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q15_fast_nonsquare.c @@ -0,0 +1,265 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_HWC_q15_fast.c + * Description: Fast Q15 version of convolution + * + * $Date: 24. May 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + + /** + * @brief Fast Q15 convolution function (non-sqaure shape) + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in_x input tensor dimention x + * @param[in] dim_im_in_y input tensor dimention y + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel_x filter kernel size x + * @param[in] dim_kernel_y filter kernel size y + * @param[in] padding_x padding size x + * @param[in] padding_y padding size y + * @param[in] stride_x convolution stride x + * @param[in] stride_y convolution stride y + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out_x output tensor dimension x + * @param[in] dim_im_out_y output tensor dimension y + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + * + * @details + * + * Buffer size: + * + * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel + * + * bufferB size: 0 + * + * Input dimension constraints: + * + * ch_im_in is multiple of 2 + * + * ch_im_out is multipe of 2 + * + */ + +arm_status +arm_convolve_HWC_q15_fast_nonsquare(const q15_t * Im_in, + const uint16_t dim_im_in_x, + const uint16_t dim_im_in_y, + const uint16_t ch_im_in, + const q15_t * wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel_x, + const uint16_t dim_kernel_y, + const uint16_t padding_x, + const uint16_t padding_y, + const uint16_t stride_x, + const uint16_t stride_y, + const q15_t * bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q15_t * Im_out, + const uint16_t dim_im_out_x, + const uint16_t dim_im_out_y, + q15_t * bufferA, + q7_t * bufferB) +{ + (void)bufferB; +#if defined (ARM_MATH_DSP) + int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; + + q15_t *pBuffer = bufferA; + q15_t *im_buffer = bufferA; + q15_t *pOut = Im_out; + + if (ch_im_in % 2 != 0 || ch_im_out % 2 != 0) + { + /* check if the input dimension meets the constraints */ + return ARM_MATH_SIZE_MISMATCH; + } + + /* Run the following code for Cortex-M4 and Cortex-M7 */ + + /* This part implements the im2col function */ + for (i_out_y = 0; i_out_y < dim_im_out_y; i_out_y++) + { + for (i_out_x = 0; i_out_x < dim_im_out_x; i_out_x++) + { + for (i_ker_y = i_out_y * stride_y - padding_y; i_ker_y < i_out_y * stride_y - padding_y + dim_kernel_y; i_ker_y++) + { + for (i_ker_x = i_out_x * stride_x - padding_x; i_ker_x < i_out_x * stride_x - padding_x + dim_kernel_x; i_ker_x++) + { + if (i_ker_y < 0 || i_ker_y >= dim_im_in_y || i_ker_x < 0 || i_ker_x >= dim_im_in_x) + { + /* arm_fill_q15(0, pBuffer, ch_im_in); */ + memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); + } else + { + /* arm_copy_q15((q15_t *) Im_in + (i_ker_y * dim_im_in_x + i_ker_x) * ch_im_in, pBuffer, ch_im_in); */ + memcpy(pBuffer, (q15_t *) Im_in + (i_ker_y * dim_im_in_x + i_ker_x) * ch_im_in, sizeof(q15_t)*ch_im_in); + } + pBuffer += ch_im_in; + } + } + + if (i_out_x & 0x1) + { + int i; + /* initialize the matrix pointers for A */ + const q15_t *pA = wt; + + /* set up the second output pointers */ + q15_t *pOut2 = pOut + ch_im_out; + + /* this loop over rows in A */ + for (i = 0; i < ch_im_out; i += 2) + { + /* setup pointers for B */ + const q15_t *pB = im_buffer; + const q15_t *pB2 = pB + ch_im_in * dim_kernel_y * dim_kernel_x; + + /* aling the second pointer for A */ + const q15_t *pA2 = pA + ch_im_in * dim_kernel_y * dim_kernel_x; + + /* init the sum with bias */ + q31_t sum = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); + q31_t sum2 = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); + q31_t sum3 = ((q31_t)bias[i + 1] << bias_shift) + NN_ROUND(out_shift); + q31_t sum4 = ((q31_t)bias[i + 1] << bias_shift) + NN_ROUND(out_shift); + + uint16_t colCnt = ch_im_in * dim_kernel_y * dim_kernel_x >> 1; + /* accumulate over the vector */ + while (colCnt) + { + q31_t inA1 = arm_nn_read_q15x2_ia(&pA); + q31_t inB1 = arm_nn_read_q15x2_ia(&pB); + q31_t inA2 = arm_nn_read_q15x2_ia(&pA2); + q31_t inB2 = arm_nn_read_q15x2_ia(&pB2); + + sum = __SMLAD(inA1, inB1, sum); + sum2 = __SMLAD(inA1, inB2, sum2); + sum3 = __SMLAD(inA2, inB1, sum3); + sum4 = __SMLAD(inA2, inB2, sum4); + + colCnt--; + } /* while over colCnt */ + colCnt = ch_im_in * dim_kernel_y * dim_kernel_x & 0x1; + while (colCnt) + { + q15_t inA1 = *pA++; + q15_t inB1 = *pB++; + q15_t inA2 = *pA2++; + q15_t inB2 = *pB2++; + + sum += inA1 * inB1; + sum2 += inA1 * inB2; + sum3 += inA2 * inB1; + sum4 += inA2 * inB2; + colCnt--; + } /* while over colCnt */ + *pOut++ = (q15_t) __SSAT(sum >> out_shift, 16); + *pOut++ = (q15_t) __SSAT(sum3 >> out_shift, 16); + *pOut2++ = (q15_t) __SSAT(sum2 >> out_shift, 16); + *pOut2++ = (q15_t) __SSAT(sum4 >> out_shift, 16); + + /* skip the row computed with A2 */ + pA += ch_im_in * dim_kernel_y * dim_kernel_x; + } /* for over ch_im_out */ + + pOut += ch_im_out; + /* counter reset */ + pBuffer = im_buffer; + } + } + } + +#else + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + uint16_t i, j, k, l, m, n; + int conv_out; + signed char in_row, in_col; + + if (ch_im_in % 2 != 0 || ch_im_out % 2 != 0) + { + /* check if the input dimension meets the constraints */ + return ARM_MATH_SIZE_MISMATCH; + } + + for (i = 0; i < ch_im_out; i++) + { + for (j = 0; j < dim_im_out_y; j++) + { + for (k = 0; k < dim_im_out_x; k++) + { + conv_out = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); + for (m = 0; m < dim_kernel_y; m++) + { + for (n = 0; n < dim_kernel_x; n++) + { + in_row = stride_y * j + m - padding_y; + in_col = stride_x * k + n - padding_x; + if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in_y && in_col < dim_im_in_x) + { + for (l = 0; l < ch_im_in; l++) + { + conv_out += + Im_in[(in_row * dim_im_in_x + in_col) * ch_im_in + + l] * wt[i * ch_im_in * dim_kernel_x * dim_kernel_y + (m * dim_kernel_x + + n) * ch_im_in + l]; + } + } + } + } + Im_out[i + (j * dim_im_out_x + k) * ch_im_out] = (q15_t) __SSAT((conv_out >> out_shift), 16); + } + } + } + +#endif /* ARM_MATH_DSP */ + + /* Return to application */ + return ARM_MATH_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q7_RGB.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q7_RGB.c new file mode 100755 index 00000000000..8d6a2eb913c --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q7_RGB.c @@ -0,0 +1,279 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_HWC_q7_RGB.c + * Description: Q7 version of convolution for RGB image + * + * $Date: 17. January 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + + /** + * @brief Q7 convolution function for RGB image + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimention + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + * + * @details + * + * Buffer size: + * + * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel + * + * bufferB size: 0 + * + * Input dimension constraints: + * + * ch_im_in equals 3 + * + * This kernel is written exclusively for convolution with ch_im_in + * equals 3. This applies on the first layer of CNNs which has input + * image with RGB format. + */ + +arm_status +arm_convolve_HWC_q7_RGB(const q7_t * Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q7_t * wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q7_t * bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t * Im_out, const uint16_t dim_im_out, q15_t * bufferA, q7_t * bufferB) +{ + (void)bufferB; +#if defined (ARM_MATH_DSP) + /* Run the following code for Cortex-M4 and Cortex-M7 */ + int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; + + /* + * Here we use bufferA as q15_t internally as computation are done with q15_t level + * im2col are done to output in q15_t format from q7_t input + */ + q15_t *pBuffer = bufferA; + q7_t *pOut = Im_out; + + // check if number of input channels is 3 + if (ch_im_in != 3) + { + return ARM_MATH_SIZE_MISMATCH; + } + // This part implements the im2col function + for (i_out_y = 0; i_out_y < dim_im_out; i_out_y++) + { + for (i_out_x = 0; i_out_x < dim_im_out; i_out_x++) + { + for (i_ker_y = i_out_y * stride - padding; i_ker_y < i_out_y * stride - padding + dim_kernel; i_ker_y++) + { + for (i_ker_x = i_out_x * stride - padding; i_ker_x < i_out_x * stride - padding + dim_kernel; i_ker_x++) + { + if (i_ker_y < 0 || i_ker_y >= dim_im_in || i_ker_x < 0 || i_ker_x >= dim_im_in) + { + /* Equivalent to arm_fill_q15(0, pBuffer, ch_im_in) with assumption: ch_im_in = 3 */ + *__SIMD32(pBuffer) = 0x0; + *(pBuffer + 2) = 0; + pBuffer += 3; + } else + { + /* + * Equivalent to: + * arm_q7_to_q15_no_shift( (q7_t*)Im_in+(i_ker_y*dim_im_in+i_ker_x)*3, pBuffer, 3); + */ + + const q7_t *pPixel = Im_in + (i_ker_y * dim_im_in + i_ker_x) * 3; + q31_t buf = arm_nn_read_q7x4(pPixel); + + union arm_nnword top; + union arm_nnword bottom; + + top.word = __SXTB16(buf); + bottom.word = __SXTB16(__ROR(buf, 8)); + +#ifndef ARM_MATH_BIG_ENDIAN + /* + * little-endian, | omit | 3rd | 2nd | 1st | + * MSB LSB + * top | 3rd | 1st |; bottom | omit | 2nd | + * + * version 1, need to swap 2nd and 3rd weight + * *__SIMD32(pBuffer) = top.word; + * *(pBuffer+2) = bottom.half_words[0]; + * + * version 2, no weight shuffling required + */ + *pBuffer++ = top.half_words[0]; + *__SIMD32(pBuffer) = __PKHBT(bottom.word, top.word, 0); +#else + /* + * big-endian, | 1st | 2nd | 3rd | omit | + * MSB LSB + * top | 2nd | omit |; bottom | 1st | 3rd | + * + * version 1, need to swap 2nd and 3rd weight + * *__SIMD32(pBuffer) = bottom.word; + * *(pBuffer+2) = top.half_words[1]; + * + * version 2, no weight shuffling required + */ + *pBuffer++ = bottom.half_words[0]; + *__SIMD32(pBuffer) = __PKHTB(top.word, bottom.word, 0); +#endif + pBuffer += 2; + } + } + } + + if (pBuffer == bufferA + 2 * 3 * dim_kernel * dim_kernel) + { + pOut = + arm_nn_mat_mult_kernel_q7_q15(wt, bufferA, + ch_im_out, + 3 * dim_kernel * dim_kernel, bias_shift, out_shift, bias, pOut); + + /* counter reset */ + pBuffer = bufferA; + } + } + } + + /* left-over because odd number of output pixels */ + if (pBuffer != bufferA) + { + const q7_t *pA = wt; + int i; + + for (i = 0; i < ch_im_out; i++) + { + q31_t sum = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); + q15_t *pB = bufferA; + /* basically each time it process 4 entries */ + uint16_t colCnt = 3 * dim_kernel * dim_kernel >> 2; + + while (colCnt) + { + + q31_t inA1, inA2; + q31_t inB1, inB2; + + pA = read_and_pad(pA, &inA1, &inA2); + + inB1 = arm_nn_read_q15x2_ia((const q15_t **)&pB); + sum = __SMLAD(inA1, inB1, sum); + inB2 = arm_nn_read_q15x2_ia((const q15_t **)&pB); + sum = __SMLAD(inA2, inB2, sum); + + colCnt--; + } + colCnt = 3 * dim_kernel * dim_kernel & 0x3; + while (colCnt) + { + q7_t inA1 = *pA++; + q15_t inB1 = *pB++; + sum += inA1 * inB1; + colCnt--; + } + *pOut++ = (q7_t) __SSAT((sum >> out_shift), 8); + } + } +#else + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + + uint16_t i, j, k, l, m, n; + int conv_out; + signed char in_row, in_col; + + // check if number of input channels is 3 + if (ch_im_in != 3) + { + return ARM_MATH_SIZE_MISMATCH; + } + + for (i = 0; i < ch_im_out; i++) + { + for (j = 0; j < dim_im_out; j++) + { + for (k = 0; k < dim_im_out; k++) + { + conv_out = (bias[i] << bias_shift) + NN_ROUND(out_shift); + for (m = 0; m < dim_kernel; m++) + { + for (n = 0; n < dim_kernel; n++) + { + /* if-for implementation */ + in_row = stride * j + m - padding; + in_col = stride * k + n - padding; + if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in && in_col < dim_im_in) + { + for (l = 0; l < ch_im_in; l++) + { + conv_out += + Im_in[(in_row * dim_im_in + in_col) * ch_im_in + + l] * wt[i * ch_im_in * dim_kernel * dim_kernel + (m * dim_kernel + + n) * ch_im_in + l]; + } + } + } + } + Im_out[i + (j * dim_im_out + k) * ch_im_out] = (q7_t) __SSAT((conv_out >> out_shift), 8); + } + } + } + +#endif /* ARM_MATH_DSP */ + + /* Return to application */ + return (ARM_MATH_SUCCESS); +} + +/** + * @} end of NNConv group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q7_basic.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q7_basic.c new file mode 100755 index 00000000000..3b12986a91f --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q7_basic.c @@ -0,0 +1,231 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_HWC_q7_basic.c + * Description: Q7 version of convolution + * + * $Date: 17. January 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + + /** + * @brief Basic Q7 convolution function + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimention + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns ARM_MATH_SUCCESS + * + * @details + * + * Buffer size: + * + * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel + * + * bufferB size: 0 + * + * This basic version is designed to work for any input tensor and weight + * dimension. + */ + +arm_status +arm_convolve_HWC_q7_basic(const q7_t * Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q7_t * wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q7_t * bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t * Im_out, + const uint16_t dim_im_out, + q15_t * bufferA, + q7_t * bufferB) +{ + (void)bufferB; +#if defined (ARM_MATH_DSP) + /* Run the following code for Cortex-M4 and Cortex-M7 */ + + int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; + + /* + * Here we use bufferA as q15_t internally as computation are done with q15_t level + * im2col are done to output in q15_t format from q7_t input + */ + q15_t *pBuffer = bufferA; + q7_t *pOut = Im_out; + + /* This part implements the im2col function */ + for (i_out_y = 0; i_out_y < dim_im_out; i_out_y++) + { + for (i_out_x = 0; i_out_x < dim_im_out; i_out_x++) + { + for (i_ker_y = i_out_y * stride - padding; i_ker_y < i_out_y * stride - padding + dim_kernel; i_ker_y++) + { + for (i_ker_x = i_out_x * stride - padding; i_ker_x < i_out_x * stride - padding + dim_kernel; i_ker_x++) + { + if (i_ker_y < 0 || i_ker_y >= dim_im_in || i_ker_x < 0 || i_ker_x >= dim_im_in) + { + /* Filling 0 for out-of-bound paddings */ + /* arm_fill_q15(0, pBuffer, ch_im_in); */ + memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); + } else + { + /* Copying the pixel data to column */ + arm_q7_to_q15_no_shift((q7_t *) + Im_in + (i_ker_y * dim_im_in + i_ker_x) * ch_im_in, pBuffer, ch_im_in); + } + pBuffer += ch_im_in; + } + } + + /* Computation is filed for every 2 columns */ + if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel * dim_kernel) + { + pOut = + arm_nn_mat_mult_kernel_q7_q15(wt, bufferA, + ch_im_out, + ch_im_in * + dim_kernel * dim_kernel, bias_shift, out_shift, bias, pOut); + + /* counter reset */ + pBuffer = bufferA; + } + } + } + + /* left-over because odd number of output pixels */ + if (pBuffer != bufferA) + { + const q7_t *pA = wt; + int i; + + for (i = 0; i < ch_im_out; i++) + { + /* Load the accumulator with bias first */ + q31_t sum = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); + + /* Point to the beging of the im2col buffer */ + const q15_t *pB = bufferA; + + /* Each time it process 4 entries */ + uint16_t colCnt = ch_im_in * dim_kernel * dim_kernel >> 2; + + while (colCnt) + { + q31_t inA1, inA2; + q31_t inB1, inB2; + + pA = read_and_pad(pA, &inA1, &inA2); + + inB1 = arm_nn_read_q15x2_ia(&pB); + sum = __SMLAD(inA1, inB1, sum); + inB2 = arm_nn_read_q15x2_ia(&pB); + + sum = __SMLAD(inA2, inB2, sum); + + colCnt--; + } + colCnt = ch_im_in * dim_kernel * dim_kernel & 0x3; + while (colCnt) + { + q7_t inA1 = *pA++; + q15_t inB1 = *pB++; + sum += inA1 * inB1; + colCnt--; + } + *pOut++ = (q7_t) __SSAT((sum >> out_shift), 8); + } + } +#else + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + + uint16_t i, j, k, l, m, n; + int conv_out; + signed char in_row, in_col; + + for (i = 0; i < ch_im_out; i++) + { + for (j = 0; j < dim_im_out; j++) + { + for (k = 0; k < dim_im_out; k++) + { + conv_out = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); + for (m = 0; m < dim_kernel; m++) + { + for (n = 0; n < dim_kernel; n++) + { + // if-for implementation + in_row = stride * j + m - padding; + in_col = stride * k + n - padding; + if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in && in_col < dim_im_in) + { + for (l = 0; l < ch_im_in; l++) + { + conv_out += + Im_in[(in_row * dim_im_in + in_col) * ch_im_in + + l] * wt[i * ch_im_in * dim_kernel * dim_kernel + (m * dim_kernel + + n) * ch_im_in + l]; + } + } + } + } + Im_out[i + (j * dim_im_out + k) * ch_im_out] = (q7_t) __SSAT((conv_out >> out_shift), 8); + } + } + } + +#endif /* ARM_MATH_DSP */ + + /* Return to application */ + return ARM_MATH_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q7_basic_nonsquare.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q7_basic_nonsquare.c new file mode 100755 index 00000000000..46ae7fdc88c --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q7_basic_nonsquare.c @@ -0,0 +1,229 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_HWC_q7_basic.c + * Description: Q7 version of convolution + * + * $Date: 13. July 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + + /** + * @brief Basic Q7 convolution function (non-sqaure shape) + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in_x input tensor dimention x + * @param[in] dim_im_in_y input tensor dimention y + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel_x filter kernel size x + * @param[in] dim_kernel_y filter kernel size y + * @param[in] padding_x padding size x + * @param[in] padding_y padding size y + * @param[in] stride_x convolution stride x + * @param[in] stride_y convolution stride y + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out_x output tensor dimension x + * @param[in] dim_im_out_y output tensor dimension y + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns ARM_MATH_SUCCESS + */ + +arm_status arm_convolve_HWC_q7_basic_nonsquare(const q7_t * Im_in, + const uint16_t dim_im_in_x, + const uint16_t dim_im_in_y, + const uint16_t ch_im_in, + const q7_t * wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel_x, + const uint16_t dim_kernel_y, + const uint16_t padding_x, + const uint16_t padding_y, + const uint16_t stride_x, + const uint16_t stride_y, + const q7_t * bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t * Im_out, + const uint16_t dim_im_out_x, + const uint16_t dim_im_out_y, + q15_t * bufferA, + q7_t * bufferB) +{ + (void)bufferB; +#if defined (ARM_MATH_DSP) + /* Run the following code for Cortex-M4 and Cortex-M7 */ + + int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; + + /* + * Here we use bufferA as q15_t internally as computation are done with q15_t level + * im2col are done to output in q15_t format from q7_t input + */ + q15_t *pBuffer = bufferA; + q7_t *pOut = Im_out; + + /* This part implements the im2col function */ + for (i_out_y = 0; i_out_y < dim_im_out_y; i_out_y++) + { + for (i_out_x = 0; i_out_x < dim_im_out_x; i_out_x++) + { + for (i_ker_y = i_out_y * stride_y - padding_y; i_ker_y < i_out_y * stride_y - padding_y + dim_kernel_y; i_ker_y++) + { + for (i_ker_x = i_out_x * stride_x - padding_x; i_ker_x < i_out_x * stride_x - padding_x + dim_kernel_x; i_ker_x++) + { + if (i_ker_y < 0 || i_ker_y >= dim_im_in_y || i_ker_x < 0 || i_ker_x >= dim_im_in_x) + { + /* Filling 0 for out-of-bound paddings */ + /* arm_fill_q15(0, pBuffer, ch_im_in); */ + memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); + } else + { + /* Copying the pixel data to column */ + arm_q7_to_q15_no_shift((q7_t *) + Im_in + (i_ker_y * dim_im_in_x + i_ker_x) * ch_im_in, pBuffer, ch_im_in); + } + pBuffer += ch_im_in; + } + } + + /* Computation is filed for every 2 columns */ + if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel_y * dim_kernel_x) + { + pOut = + arm_nn_mat_mult_kernel_q7_q15(wt, bufferA, + ch_im_out, + ch_im_in * + dim_kernel_y * dim_kernel_x, bias_shift, out_shift, bias, pOut); + + /* counter reset */ + pBuffer = bufferA; + } + } + } + + /* left-over because odd number of output pixels */ + if (pBuffer != bufferA) + { + const q7_t *pA = wt; + int i; + + for (i = 0; i < ch_im_out; i++) + { + /* Load the accumulator with bias first */ + q31_t sum = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); + + /* Point to the beging of the im2col buffer */ + const q15_t *pB = bufferA; + + /* Each time it process 4 entries */ + uint16_t colCnt = ch_im_in * dim_kernel_y * dim_kernel_x >> 2; + + while (colCnt) + { + q31_t inA1, inA2; + q31_t inB1, inB2; + + pA = read_and_pad(pA, &inA1, &inA2); + + inB1 = arm_nn_read_q15x2_ia(&pB); + sum = __SMLAD(inA1, inB1, sum); + inB2 = arm_nn_read_q15x2_ia(&pB); + + sum = __SMLAD(inA2, inB2, sum); + + colCnt--; + } + colCnt = ch_im_in * dim_kernel_y * dim_kernel_x & 0x3; + while (colCnt) + { + q7_t inA1 = *pA++; + q15_t inB1 = *pB++; + sum += inA1 * inB1; + colCnt--; + } + *pOut++ = (q7_t) __SSAT((sum >> out_shift), 8); + } + } +#else + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + + uint16_t i, j, k, l, m, n; + int conv_out; + signed char in_row, in_col; + + for (i = 0; i < ch_im_out; i++) + { + for (j = 0; j < dim_im_out_y; j++) + { + for (k = 0; k < dim_im_out_x; k++) + { + conv_out = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); + for (m = 0; m < dim_kernel_y; m++) + { + for (n = 0; n < dim_kernel_x; n++) + { + // if-for implementation + in_row = stride_y * j + m - padding_y; + in_col = stride_x * k + n - padding_x; + if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in_y && in_col < dim_im_in_x) + { + for (l = 0; l < ch_im_in; l++) + { + conv_out += + Im_in[(in_row * dim_im_in_x + in_col) * ch_im_in + l] * + wt[i * ch_im_in * dim_kernel_y * dim_kernel_x + + (m * dim_kernel_x + n) * ch_im_in + l]; + } + } + } + } + Im_out[i + (j * dim_im_out_x + k) * ch_im_out] = (q7_t) __SSAT((conv_out >> out_shift), 8); + } + } + } + +#endif /* ARM_MATH_DSP */ + + /* Return to application */ + return ARM_MATH_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q7_fast.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q7_fast.c new file mode 100755 index 00000000000..47f042d73a8 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q7_fast.c @@ -0,0 +1,408 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_HWC_q7_fast.c + * Description: Fast Q7 version of convolution + * + * $Date: 17. January 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + + /** + * @brief Fast Q7 convolution function + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimention + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + * + * @details + * + * Buffer size: + * + * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel + * + * bufferB size: 0 + * + * Input dimension constraints: + * + * ch_im_in is multiple of 4 ( because of the SIMD32 read and swap ) + * + * ch_im_out is multipe of 2 ( bacause 2x2 mat_mult kernel ) + * + * The im2col converts the Q7 tensor input into Q15 column, which is stored in + * bufferA. There is reordering happenning during this im2col process with + * arm_q7_to_q15_reordered_no_shift. For every four elements, the second and + * third elements are swapped. + * + * The computation kernel arm_nn_mat_mult_kernel_q7_q15_reordered does the + * GEMM computation with the reordered columns. + * + * To speed-up the determination of the padding condition, we split the + * computation into 3x3 parts, i.e., {top, mid, bottom} X {left, mid, right}. + * This reduces the total number of boundary condition checks and improves + * the data copying performance. + */ + +arm_status +arm_convolve_HWC_q7_fast(const q7_t * Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q7_t * wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q7_t * bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t * Im_out, + const uint16_t dim_im_out, + q15_t * bufferA, + q7_t * bufferB) +{ + (void)bufferB; +#if defined (ARM_MATH_DSP) + /* Run the following code for Cortex-M4 and Cortex-M7 */ + + int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; + + /* + * Here we use bufferA as q15_t internally as computation are done with q15_t level + * im2col are done to output in q15_t format from q7_t input + */ + + q15_t *pBuffer = bufferA; + q7_t *pOut = Im_out; + + if (ch_im_in % 4 != 0 || ch_im_out % 2 != 0) + { + /* check if the input dimension meets the constraints */ + return ARM_MATH_SIZE_MISMATCH; + } + + /* + * Here we split the entire matrix into three regions depending on the padding situation + * Top: i_out_y from 0 to padding - 1 + * Middle: i_out_y from padding to dim_im_out-padding-1 + * Bottom: i_out_y from dim_im_out-padding to dim_im_out-1 + */ + + /* top part */ + for (i_out_y = 0; i_out_y < padding; i_out_y++) + { + for (i_out_x = 0; i_out_x < dim_im_out; i_out_x++) + { + /* This part implements the im2col function */ + for (i_ker_y = i_out_y * stride - padding; i_ker_y < i_out_y * stride - padding + dim_kernel; i_ker_y++) + { + for (i_ker_x = i_out_x * stride - padding; i_ker_x < i_out_x * stride - padding + dim_kernel; i_ker_x++) + { + if (i_ker_y < 0 || i_ker_y >= dim_im_in || i_ker_x < 0 || i_ker_x >= dim_im_in) + { + /* arm_fill_q15(0, pBuffer, ch_im_in); */ + memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); + } else + { + arm_q7_to_q15_reordered_no_shift + ((q7_t *) Im_in + (i_ker_y * dim_im_in + i_ker_x) * ch_im_in, pBuffer, ch_im_in); + } + pBuffer += ch_im_in; + } + } + + if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel * dim_kernel) + { + pOut = + arm_nn_mat_mult_kernel_q7_q15_reordered(wt, + bufferA, + ch_im_out, + ch_im_in + * + dim_kernel * dim_kernel, bias_shift, out_shift, bias, pOut); + /* counter reset */ + pBuffer = bufferA; + } + } + } + + /* middle part, here we also divide the x into left, mid and right */ + for (; i_out_y < dim_im_out - padding; i_out_y++) + { + + /* left part */ + for (i_out_x = 0; i_out_x < padding; i_out_x++) + { + /* This part implements the im2col function */ + for (i_ker_y = i_out_y * stride - padding; i_ker_y < i_out_y * stride - padding + dim_kernel; i_ker_y++) + { + for (i_ker_x = i_out_x * stride - padding; i_ker_x < i_out_x * stride - padding + dim_kernel; i_ker_x++) + { + if (i_ker_x < 0 || i_ker_x >= dim_im_in) + { + /* arm_fill_q15(0, pBuffer, ch_im_in); */ + memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); + } else + { + arm_q7_to_q15_reordered_no_shift + ((q7_t *) Im_in + (i_ker_y * dim_im_in + i_ker_x) * ch_im_in, pBuffer, ch_im_in); + } + pBuffer += ch_im_in; + } + } + + if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel * dim_kernel) + { + pOut = + arm_nn_mat_mult_kernel_q7_q15_reordered(wt, + bufferA, + ch_im_out, + ch_im_in + * + dim_kernel * dim_kernel, bias_shift, out_shift, bias, pOut); + /* counter reset */ + pBuffer = bufferA; + } + } + + /* mid part */ + for (; i_out_x < dim_im_out - padding; i_out_x++) + { + /* This part implements the im2col function */ + for (i_ker_y = i_out_y * stride - padding; i_ker_y < i_out_y * stride - padding + dim_kernel; i_ker_y++) + { + arm_q7_to_q15_reordered_no_shift((q7_t *) Im_in + + + (i_ker_y * + dim_im_in + + i_out_x * + stride - padding) * ch_im_in, pBuffer, ch_im_in * dim_kernel); + pBuffer += ch_im_in * dim_kernel; + } + + if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel * dim_kernel) + { + pOut = + arm_nn_mat_mult_kernel_q7_q15_reordered(wt, + bufferA, + ch_im_out, + ch_im_in + * + dim_kernel * dim_kernel, bias_shift, out_shift, bias, pOut); + /* counter reset */ + pBuffer = bufferA; + } + } + + /* right part */ + for (; i_out_x < dim_im_out; i_out_x++) + { + /* This part implements the im2col function */ + for (i_ker_y = i_out_y * stride - padding; i_ker_y < i_out_y * stride - padding + dim_kernel; i_ker_y++) + { + for (i_ker_x = i_out_x * stride - padding; i_ker_x < i_out_x * stride - padding + dim_kernel; i_ker_x++) + { + if (i_ker_x < 0 || i_ker_x >= dim_im_in) + { + /* arm_fill_q15(0, pBuffer, ch_im_in); */ + memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); + } else + { + arm_q7_to_q15_reordered_no_shift + ((q7_t *) Im_in + (i_ker_y * dim_im_in + i_ker_x) * ch_im_in, pBuffer, ch_im_in); + } + pBuffer += ch_im_in; + } + } + + if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel * dim_kernel) + { + pOut = + arm_nn_mat_mult_kernel_q7_q15_reordered(wt, + bufferA, + ch_im_out, + ch_im_in + * + dim_kernel * dim_kernel, bias_shift, out_shift, bias, pOut); + /* counter reset */ + pBuffer = bufferA; + } + } + } + + for (; i_out_y < dim_im_out; i_out_y++) + { + for (i_out_x = 0; i_out_x < dim_im_out; i_out_x++) + { + /* This part implements the im2col function */ + for (i_ker_y = i_out_y * stride - padding; i_ker_y < i_out_y * stride - padding + dim_kernel; i_ker_y++) + { + for (i_ker_x = i_out_x * stride - padding; i_ker_x < i_out_x * stride - padding + dim_kernel; i_ker_x++) + { + if (i_ker_y < 0 || i_ker_y >= dim_im_in || i_ker_x < 0 || i_ker_x >= dim_im_in) + { + /* arm_fill_q15(0, pBuffer, ch_im_in); */ + memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); + } else + { + arm_q7_to_q15_reordered_no_shift + ((q7_t *) Im_in + (i_ker_y * dim_im_in + i_ker_x) * ch_im_in, pBuffer, ch_im_in); + } + pBuffer += ch_im_in; + } + } + + if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel * dim_kernel) + { + pOut = + arm_nn_mat_mult_kernel_q7_q15_reordered(wt, + bufferA, + ch_im_out, + ch_im_in + * + dim_kernel * dim_kernel, bias_shift, out_shift, bias, pOut); + /* counter reset */ + pBuffer = bufferA; + } + } + } + + /* check if there is left-over for compute */ + if (pBuffer != bufferA) + { + const q7_t *pA = wt; + int i; + + for (i = 0; i < ch_im_out; i++) + { + q31_t sum = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift); + const q15_t *pB = bufferA; + /* each time it process 4 entries */ + uint16_t colCnt = ch_im_in * dim_kernel * dim_kernel >> 2; + + while (colCnt) + { + + q31_t inA1, inA2; + q31_t inB1, inB2; + + pA = read_and_pad_reordered(pA, &inA1, &inA2); + + inB1 = arm_nn_read_q15x2_ia(&pB); + sum = __SMLAD(inA1, inB1, sum); + inB2 = arm_nn_read_q15x2_ia(&pB); + sum = __SMLAD(inA2, inB2, sum); + + colCnt--; + } + colCnt = ch_im_in * dim_kernel * dim_kernel & 0x3; + while (colCnt) + { + q7_t inA1 = *pA++; + q15_t inB1 = *pB++; + sum += inA1 * inB1; + colCnt--; + } + *pOut = (q7_t) __SSAT((sum >> out_shift), 8); + pOut++; + + } + + } +#else + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + + uint16_t i, j, k, l, m, n; + int conv_out; + signed char in_row, in_col; + + if (ch_im_in % 4 != 0 || ch_im_out % 2 != 0) + { + /* check if the input dimension meets the constraints */ + return ARM_MATH_SIZE_MISMATCH; + } + + for (i = 0; i < ch_im_out; i++) + { + for (j = 0; j < dim_im_out; j++) + { + for (k = 0; k < dim_im_out; k++) + { + conv_out = (bias[i] << bias_shift) + NN_ROUND(out_shift); + for (m = 0; m < dim_kernel; m++) + { + for (n = 0; n < dim_kernel; n++) + { + // if-for implementation + in_row = stride * j + m - padding; + in_col = stride * k + n - padding; + if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in && in_col < dim_im_in) + { + for (l = 0; l < ch_im_in; l++) + { + conv_out += + Im_in[(in_row * dim_im_in + in_col) * ch_im_in + + l] * wt[i * ch_im_in * dim_kernel * dim_kernel + (m * dim_kernel + + n) * ch_im_in + l]; + } + } + } + } + Im_out[i + (j * dim_im_out + k) * ch_im_out] = (q7_t) __SSAT((conv_out >> out_shift), 8); + } + } + } + +#endif /* ARM_MATH_DSP */ + + /* Return to application */ + return ARM_MATH_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q7_fast_nonsquare.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q7_fast_nonsquare.c new file mode 100755 index 00000000000..2dc94ef9cea --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_HWC_q7_fast_nonsquare.c @@ -0,0 +1,379 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_HWC_q7_fast_nonsquare.c + * Description: Fast Q7 version of convolution (non-sqaure shape) + * + * $Date: 17. January 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/** + * @brief Fast Q7 convolution function (non-sqaure shape) + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in_x input tensor dimention x + * @param[in] dim_im_in_y input tensor dimention y + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel_x filter kernel size x + * @param[in] dim_kernel_y filter kernel size y + * @param[in] padding_x padding size x + * @param[in] padding_y padding size y + * @param[in] stride_x convolution stride x + * @param[in] stride_y convolution stride y + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out_x output tensor dimension x + * @param[in] dim_im_out_y output tensor dimension y + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + * + * This function is the version with full list of optimization tricks, but with + * some contraints: + * ch_im_in is multiple of 4 + * ch_im_out is multiple of 2 + */ + +arm_status arm_convolve_HWC_q7_fast_nonsquare(const q7_t * Im_in, + const uint16_t dim_im_in_x, + const uint16_t dim_im_in_y, + const uint16_t ch_im_in, + const q7_t * wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel_x, + const uint16_t dim_kernel_y, + const uint16_t padding_x, + const uint16_t padding_y, + const uint16_t stride_x, + const uint16_t stride_y, + const q7_t * bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t * Im_out, + const uint16_t dim_im_out_x, + const uint16_t dim_im_out_y, + q15_t * bufferA, + q7_t * bufferB) +{ + (void)bufferB; +#if defined (ARM_MATH_DSP) + /* Run the following code for Cortex-M4 and Cortex-M7 */ + + int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; + + /* ----------------------- + * Here we use bufferA as q15_t internally as computation are done with q15_t level + * im2col are done to output in q15_t format from q7_t input + */ + + q15_t *pBuffer = bufferA; + q7_t *pOut = Im_out; + + if (ch_im_in % 4 != 0 || ch_im_out % 2 != 0) + { + /* check if the input dimension meets the constraints */ + return ARM_MATH_SIZE_MISMATCH; + } + + /* + * Here we split the entire matrix into three regions depending on the padding situation + * Top: i_out_y from 0 to padding - 1 + * Middle: i_out_y from padding to dim_im_out-padding-1 + * Bottom: i_out_y from dim_im_out-padding to dim_im_out-1 + */ + + /* top part */ + for (i_out_y = 0; i_out_y < padding_y; i_out_y++) + { + for (i_out_x = 0; i_out_x < dim_im_out_x; i_out_x++) + { + /* This part implements the im2col function */ + for (i_ker_y = i_out_y * stride_y - padding_y; i_ker_y < i_out_y * stride_y - padding_y + dim_kernel_y; + i_ker_y++) + { + for (i_ker_x = i_out_x * stride_x - padding_x; i_ker_x < i_out_x * stride_x - padding_x + dim_kernel_x; + i_ker_x++) + { + if (i_ker_y < 0 || i_ker_y >= dim_im_in_y || i_ker_x < 0 || i_ker_x >= dim_im_in_x) + { + /* arm_fill_q15(0, pBuffer, ch_im_in); */ + memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); + } else + { + arm_q7_to_q15_reordered_no_shift((q7_t *) Im_in + (i_ker_y * dim_im_in_x + i_ker_x) * ch_im_in, + pBuffer, ch_im_in); + } + pBuffer += ch_im_in; + } + } + + if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel_x * dim_kernel_y) + { + pOut = + arm_nn_mat_mult_kernel_q7_q15_reordered(wt, bufferA, ch_im_out, ch_im_in * dim_kernel_x * dim_kernel_y, + bias_shift, out_shift, bias, pOut); + /* counter reset */ + pBuffer = bufferA; + } + } + } + + /* middle part, here we also divide the x into left, mid and right */ + for (; i_out_y < dim_im_out_y - padding_y; i_out_y++) + { + + /* left part */ + for (i_out_x = 0; i_out_x < padding_x; i_out_x++) + { + /* This part implements the im2col function */ + for (i_ker_y = i_out_y * stride_y - padding_y; i_ker_y < i_out_y * stride_y - padding_y + dim_kernel_y; + i_ker_y++) + { + for (i_ker_x = i_out_x * stride_x - padding_x; i_ker_x < i_out_x * stride_x - padding_x + dim_kernel_x; + i_ker_x++) + { + if (i_ker_x < 0 || i_ker_x >= dim_im_in_x) + { + /* arm_fill_q15(0, pBuffer, ch_im_in); */ + memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); + } else + { + arm_q7_to_q15_reordered_no_shift((q7_t *) Im_in + (i_ker_y * dim_im_in_x + i_ker_x) * ch_im_in, + pBuffer, ch_im_in); + } + pBuffer += ch_im_in; + } + } + + if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel_x * dim_kernel_y) + { + pOut = + arm_nn_mat_mult_kernel_q7_q15_reordered(wt, bufferA, ch_im_out, ch_im_in * dim_kernel_x * dim_kernel_y, + bias_shift, out_shift, bias, pOut); + /* counter reset */ + pBuffer = bufferA; + } + } + + /* mid part */ + for (; i_out_x < dim_im_out_x - padding_x; i_out_x++) + { + /* This part implements the im2col function */ + for (i_ker_y = i_out_y * stride_y - padding_y; i_ker_y < i_out_y * stride_y - padding_y + dim_kernel_y; + i_ker_y++) + { + arm_q7_to_q15_reordered_no_shift((q7_t *) Im_in + + (i_ker_y * dim_im_in_x + i_out_x * stride_x - padding_x) * ch_im_in, + pBuffer, ch_im_in * dim_kernel_x); + pBuffer += ch_im_in * dim_kernel_x; + } + + if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel_x * dim_kernel_y) + { + pOut = + arm_nn_mat_mult_kernel_q7_q15_reordered(wt, bufferA, ch_im_out, ch_im_in * dim_kernel_x * dim_kernel_y, + bias_shift, out_shift, bias, pOut); + /* counter reset */ + pBuffer = bufferA; + } + } + + /* right part */ + for (; i_out_x < dim_im_out_x; i_out_x++) + { + /* This part implements the im2col function */ + for (i_ker_y = i_out_y * stride_y - padding_y; i_ker_y < i_out_y * stride_y - padding_y + dim_kernel_y; + i_ker_y++) + { + for (i_ker_x = i_out_x * stride_x - padding_x; i_ker_x < i_out_x * stride_x - padding_x + dim_kernel_x; + i_ker_x++) + { + if (i_ker_x < 0 || i_ker_x >= dim_im_in_x) + { + /* arm_fill_q15(0, pBuffer, ch_im_in); */ + memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); + } else + { + arm_q7_to_q15_reordered_no_shift((q7_t *) Im_in + (i_ker_y * dim_im_in_x + i_ker_x) * ch_im_in, + pBuffer, ch_im_in); + } + pBuffer += ch_im_in; + } + } + + if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel_x * dim_kernel_y) + { + pOut = + arm_nn_mat_mult_kernel_q7_q15_reordered(wt, bufferA, ch_im_out, ch_im_in * dim_kernel_x * dim_kernel_y, + bias_shift, out_shift, bias, pOut); + /* counter reset */ + pBuffer = bufferA; + } + } + } + + for (; i_out_y < dim_im_out_y; i_out_y++) + { + for (i_out_x = 0; i_out_x < dim_im_out_x; i_out_x++) + { + /* This part implements the im2col function */ + for (i_ker_y = i_out_y * stride_y - padding_y; i_ker_y < i_out_y * stride_y - padding_y + dim_kernel_y; + i_ker_y++) + { + for (i_ker_x = i_out_x * stride_x - padding_x; i_ker_x < i_out_x * stride_x - padding_x + dim_kernel_x; + i_ker_x++) + { + if (i_ker_y < 0 || i_ker_y >= dim_im_in_y || i_ker_x < 0 || i_ker_x >= dim_im_in_x) + { + /* arm_fill_q15(0, pBuffer, ch_im_in); */ + memset(pBuffer, 0, sizeof(q15_t)*ch_im_in); + } else + { + arm_q7_to_q15_reordered_no_shift((q7_t *) Im_in + (i_ker_y * dim_im_in_x + i_ker_x) * ch_im_in, + pBuffer, ch_im_in); + } + pBuffer += ch_im_in; + } + } + + if (pBuffer == bufferA + 2 * ch_im_in * dim_kernel_x * dim_kernel_y) + { + pOut = + arm_nn_mat_mult_kernel_q7_q15_reordered(wt, bufferA, ch_im_out, ch_im_in * dim_kernel_x * dim_kernel_y, + bias_shift, out_shift, bias, pOut); + /* counter reset */ + pBuffer = bufferA; + } + } + } + + /* check if there is left-over for compute */ + if (pBuffer != bufferA) + { + const q7_t *pA = wt; + int i; + for (i = 0; i < ch_im_out; i++) + { + q31_t sum = ((q31_t)(bias[i]) << bias_shift) + NN_ROUND(out_shift); + const q15_t *pB = bufferA; + /* basically each time it process 4 entries */ + uint16_t colCnt = ch_im_in * dim_kernel_x * dim_kernel_y >> 2; + + while (colCnt) + { + + q31_t inA1, inA2; + q31_t inB1, inB2; + + pA = read_and_pad_reordered(pA, &inA1, &inA2); + + inB1 = arm_nn_read_q15x2_ia(&pB); + sum = __SMLAD(inA1, inB1, sum); + inB2 = arm_nn_read_q15x2_ia(&pB); + sum = __SMLAD(inA2, inB2, sum); + + colCnt--; + } + colCnt = (ch_im_in * dim_kernel_y * dim_kernel_x) & 0x3; + while (colCnt) + { + q7_t inA1 = *pA++; + q15_t inB1 = *pB++; + sum += inA1 * inB1; + colCnt--; + } + *pOut = (q7_t) __SSAT((sum >> out_shift), 8); + pOut++; + + } + + } + +#else + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + int i, j, k, l, m, n; + int conv_out; + int in_row, in_col; + + if (ch_im_in % 4 != 0 || ch_im_out % 2 != 0) + { + /* check if the input dimension meets the constraints */ + return ARM_MATH_SIZE_MISMATCH; + } + + for (i = 0; i < ch_im_out; i++) + { + for (j = 0; j < dim_im_out_y; j++) + { + for (k = 0; k < dim_im_out_x; k++) + { + conv_out = ((q31_t)(bias[i]) << bias_shift) + NN_ROUND(out_shift); + for (m = 0; m < dim_kernel_y; m++) + { + for (n = 0; n < dim_kernel_x; n++) + { + /* if-for implementation */ + in_row = stride_y * j + m - padding_y; + in_col = stride_x * k + n - padding_x; + if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in_y && in_col < dim_im_in_x) + { + for (l = 0; l < ch_im_in; l++) + { + conv_out += Im_in[(in_row * dim_im_in_x + in_col) * ch_im_in + l] * + wt[i * ch_im_in * dim_kernel_y * dim_kernel_x + (m * dim_kernel_x + n) * ch_im_in + l]; + } + } + } + } + Im_out[i + (j * dim_im_out_x + k) * ch_im_out] = (q7_t) __SSAT((conv_out >> out_shift), 8); + } + } + } + + +#endif /* ARM_MATH_DSP */ + + /* Return to application */ + return ARM_MATH_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_s8.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_s8.c new file mode 100755 index 00000000000..61cf15caf85 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_convolve_s8.c @@ -0,0 +1,359 @@ +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_s8.c + * Description: s8 version of convolution using symmetric quantization. + * + * $Date: March 1, 2020 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ +#include "arm_math.h" +#include "arm_nnfunctions.h" +#include "arm_nnsupportfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * Basic s8 convolution function. + * + * Refer header file for details. Optimal use case for the DSP/MVE implementation is when input and output channels + * are multiples of 4 or atleast greater than 4. + * + */ + +arm_status arm_convolve_s8(const q7_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_ch, + const uint16_t input_batches, + const q7_t *kernel, + const uint16_t output_ch, + const uint16_t kernel_x, + const uint16_t kernel_y, + const uint16_t pad_x, + const uint16_t pad_y, + const uint16_t stride_x, + const uint16_t stride_y, + const int32_t *bias, + q7_t *output, + const int32_t *output_shift, + const int32_t *output_mult, + const int32_t out_offset, + const int32_t input_offset, + const int32_t out_activation_min, + const int32_t out_activation_max, + const uint16_t output_x, + const uint16_t output_y, + q15_t *buffer_a) +{ + int i_batch; + for (i_batch = 0; i_batch < input_batches; i_batch++) + { + input += i_batch * (input_x * input_y * input_ch); + output += i_batch * (output_x * output_y * output_ch); +#if defined(ARM_MATH_MVEI) + /* Generate upto four columns from the input tensor a GEMM computation */ + q7_t *im2col_buf = (q7_t *)buffer_a; + q7_t *out = output; + int32_t buffer_fill_cnt = 0; + int32_t padded = 0; + const int32_t num_elem = kernel_x * kernel_y * input_ch; + + /* This part implements the im2col function */ + for (int i_out_y = 0; i_out_y < output_y; i_out_y++) + { + for (int i_out_x = 0; i_out_x < output_x; i_out_x++) + { + for (int i_ker_y = i_out_y * stride_y - pad_y; i_ker_y < i_out_y * stride_y - pad_y + kernel_y; i_ker_y++) + { + for (int i_ker_x = i_out_x * stride_x - pad_x; i_ker_x < i_out_x * stride_x - pad_x + kernel_x; i_ker_x++) + { + if (i_ker_y < 0 || i_ker_y >= input_y || i_ker_x < 0 || i_ker_x >= input_x) + { + memset(im2col_buf, (int8_t)-input_offset, sizeof(q7_t) * input_ch); + padded = 1; + } + else + { + arm_memcpy_q7(im2col_buf, input + (i_ker_y * input_x + i_ker_x) * input_ch, input_ch); + } + im2col_buf += input_ch; + } + } + + buffer_fill_cnt++; + + /* Computation is filed for every 4 columns */ + if (buffer_fill_cnt == 4 && (padded == 0)) + { + buffer_fill_cnt = 0; + for (int i_out_ch = 0; i_out_ch < output_ch; i_out_ch++) + { + int32_t sum_row; + int32_t acc[4]; + + (void)arm_nn_mat_mul_core_4x_s8(num_elem, + num_elem, + (q7_t *)buffer_a, + kernel + num_elem * i_out_ch, + &sum_row, + acc); + int32x4_t s_offset = vdupq_n_s32(sum_row); + + int32x4_t res = vldrwq_s32(acc); + s_offset = vmulq_n_s32(s_offset, input_offset); + + res = vaddq_n_s32(res, bias[i_out_ch]); + res = vaddq_s32(res, s_offset); + res = arm_requantize_mve(res, output_mult[i_out_ch], output_shift[i_out_ch]); + res = vaddq_n_s32(res, out_offset); + + res = vmaxq_s32(res, vdupq_n_s32(out_activation_min)); + res = vminq_s32(res, vdupq_n_s32(out_activation_max)); + + const uint32x4_t scatter_offset = {0, output_ch, output_ch * 2, output_ch * 3}; + vstrbq_scatter_offset_s32(out, scatter_offset, res); + out++; + } + out += (3 * output_ch); + im2col_buf = (q7_t *)buffer_a; + } + else if (buffer_fill_cnt == 4 && (padded != 0)) + { + buffer_fill_cnt = 0; + out = arm_nn_mat_mult_s8(kernel, + (q7_t *)buffer_a, + output_ch, + 4, + output_shift, + output_mult, + out_offset, + input_offset, + 0, + out_activation_min, + out_activation_max, + num_elem, + bias, + out); + + im2col_buf = (q7_t *)buffer_a; + padded = 0; + } + } + } + /* Handle left over columns */ + if (buffer_fill_cnt != 0) + { + out = arm_nn_mat_mult_s8(kernel, + (q7_t *)buffer_a, + output_ch, + buffer_fill_cnt, + output_shift, + output_mult, + out_offset, + input_offset, + 0, + out_activation_min, + out_activation_max, + num_elem, + bias, + out); + } + +#elif defined(ARM_MATH_DSP) + int32_t i_out_y, i_out_x, i_ker_y, i_ker_x; + + /* Generate two columns from the input tensor a GEMM computation */ + q15_t *two_column_buf = buffer_a; + q7_t *out = output; + + /* This part implements the im2col function */ + for (i_out_y = 0; i_out_y < output_y; i_out_y++) + { + for (i_out_x = 0; i_out_x < output_x; i_out_x++) + { + for (i_ker_y = i_out_y * stride_y - pad_y; i_ker_y < i_out_y * stride_y - pad_y + kernel_y; i_ker_y++) + { + for (i_ker_x = i_out_x * stride_x - pad_x; i_ker_x < i_out_x * stride_x - pad_x + kernel_x; i_ker_x++) + { + if (i_ker_y < 0 || i_ker_y >= input_y || i_ker_x < 0 || i_ker_x >= input_x) + { + /* Filling 0 for out-of-bound paddings */ + memset(two_column_buf, 0, sizeof(q15_t) * input_ch); + } + else + { + /* Copying the pixel data to column */ + arm_q7_to_q15_with_offset(input + (i_ker_y * input_x + i_ker_x) * input_ch, two_column_buf, input_ch, input_offset); + } + two_column_buf += input_ch; + } + } + + /* Computation is filed for every 2 columns */ + if (two_column_buf == buffer_a + 2 * input_ch * kernel_y * kernel_x) + { + out = + arm_nn_mat_mult_kernel_s8_s16(kernel, + buffer_a, + output_ch, + output_shift, + output_mult, + out_offset, + out_activation_min, + out_activation_max, + input_ch * kernel_y * kernel_x, + bias, + out); + + /* counter reset */ + two_column_buf = buffer_a; + } + } + } + + /* left-over because odd number of output pixels */ + if (two_column_buf != buffer_a) + { + const q7_t *ker_a = kernel; + int i; + + for (i = 0; i < output_ch; i++) + { + /* Load the accumulator with bias first */ + q31_t sum = bias[i]; + + /* Point to the beginning of the im2col buffer where the input is available as a rearranged column */ + const q15_t *ip_as_col = buffer_a; + + /* 4 multiply and accumulates are done in one loop. */ + uint16_t col_count = (input_ch * kernel_y * kernel_x) >> 2; + + while (col_count) + { + q31_t ker_a1, ker_a2; + q31_t ip_b1, ip_b2; + + ker_a = read_and_pad(ker_a, &ker_a1, &ker_a2); + + ip_b1 = arm_nn_read_q15x2_ia(&ip_as_col); + sum = __SMLAD(ker_a1, ip_b1, sum); + ip_b2 = arm_nn_read_q15x2_ia(&ip_as_col); + sum = __SMLAD(ker_a2, ip_b2, sum); + + col_count--; + } + /* Handle left over mac */ + col_count = input_ch * kernel_y * kernel_x & 0x3; + while (col_count) + { + q7_t ker_a1 = *ker_a++; + q15_t ip_b1 = *ip_as_col++; + sum += ker_a1 * ip_b1; + col_count--; + } + + sum = arm_nn_requantize(sum, output_mult[i], output_shift[i]); + sum += out_offset; + sum = MAX(sum, out_activation_min); + sum = MIN(sum, out_activation_max); + *out++ = (q7_t)sum; + } + } +#else + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + (void)buffer_a; + int32_t i_out_ch, i_out_y, i_out_x, i_input_ch, i_ker_y, i_ker_x; + int32_t conv_out; + + for (i_out_ch = 0; i_out_ch < output_ch; i_out_ch++) + { + for (i_out_y = 0; i_out_y < output_y; i_out_y++) + { + for (i_out_x = 0; i_out_x < output_x; i_out_x++) + { + conv_out = bias[i_out_ch]; + + const int32_t base_idx_y = stride_y * i_out_y - pad_y; + const int32_t base_idx_x = stride_x * i_out_x - pad_x; + + const int32_t ker_y_start = MAX(0, -base_idx_y); + const int32_t ker_x_start = MAX(0, -base_idx_x); + + const int32_t ker_y_end = MIN(kernel_y, input_y - base_idx_y); + const int32_t ker_x_end = MIN(kernel_x, input_x - base_idx_x); + + for (i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) + { + for (i_ker_x = ker_x_start; i_ker_x < ker_x_end; i_ker_x++) + { + const int32_t in_row = base_idx_y + i_ker_y; + const int32_t in_col = base_idx_x + i_ker_x; + for (i_input_ch = 0; i_input_ch < input_ch; i_input_ch++) + { + conv_out += + (input[(in_row * input_x + in_col) * input_ch + i_input_ch] + input_offset) * + kernel[i_out_ch * input_ch * kernel_y * kernel_x + + (i_ker_y * kernel_x + i_ker_x) * input_ch + i_input_ch]; + } + } + } + conv_out = arm_nn_requantize(conv_out, output_mult[i_out_ch], output_shift[i_out_ch]); + conv_out += out_offset; + conv_out = MAX(conv_out, out_activation_min); + conv_out = MIN(conv_out, out_activation_max); + output[i_out_ch + (i_out_y * output_x + i_out_x) * output_ch] = (int8_t)conv_out; + } + } + } +#endif + } + + /* Return to application */ + return ARM_MATH_SUCCESS; +} + +int32_t arm_convolve_s8_get_buffer_size(const uint16_t input_ch, + const uint16_t kernel_x, + const uint16_t kernel_y) +{ +#if defined(ARM_MATH_DSP) + return (2 * input_ch * kernel_x * kernel_y) * sizeof(int16_t); +#else + (void)input_ch; + (void)kernel_x; + (void)kernel_y; + return 0; +#endif +} + +/** + * @} end of NNConv group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_conv_3x3_s8.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_conv_3x3_s8.c new file mode 100755 index 00000000000..723b797a7de --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_conv_3x3_s8.c @@ -0,0 +1,209 @@ +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_depthwise_conv_3x3_s8.c + * Description: Optimized s8 depthwise convolution function for channel + * multiplier of 1 and 3x3 kernel size. + * + * $Date: February 26, 2020 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnsupportfunctions.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * Optimized s8 depthwise convolution function with constraint that + * in_channel == out_channel and kernel_x == kernel_y == 3 with pads at most 1 + * + * Refer prototype header file for details. + * + */ +arm_status arm_depthwise_conv_3x3_s8(const int8_t *input, + const int32_t input_x, + const int32_t input_y, + const int32_t input_ch, + const int8_t *kernel, + const int32_t output_ch, + const int32_t pad_x, + const int32_t pad_y, + const int32_t stride_x, + const int32_t stride_y, + const int32_t *bias, + int8_t *output, + const int32_t *output_shift, + const int32_t *output_mult, + const int32_t output_x, + const int32_t output_y, + const int32_t output_offset, + const int32_t input_offset, + const int32_t output_activation_min, + const int32_t output_activation_max, + const int32_t dilation_x, + const int32_t dilation_y, + int16_t *buffer_a) +{ + + /* Check input constraints input_ch == output_ch */ + if (input_ch != output_ch) + { + return ARM_MATH_SIZE_MISMATCH; + } + /* Check input constraints pad_x <= 1 */ + if(pad_x > 1) + { + return ARM_MATH_ARGUMENT_ERROR; + } + + (void)dilation_x; + (void)dilation_y; + (void)buffer_a; + + for(int32_t in_h = -pad_y, out_h = 0, out_idx = 0; out_h < output_y; in_h += stride_y, ++out_h) + { + for(int32_t in_w = -pad_x, out_w = 0, ker_h_start = MAX(0, -in_h); out_w < output_x; in_w += stride_x, ++out_w) + { + int32_t in_ch = 0; + int32_t ker_w_start = MAX(0, -in_w); + + for(; in_ch <= (input_ch - 4); in_ch += 4) + { + int32_t out_buff0 = bias[in_ch + 0]; + int32_t out_buff1 = bias[in_ch + 1]; + int32_t out_buff2 = bias[in_ch + 2]; + int32_t out_buff3 = bias[in_ch + 3]; + + const int8_t *input_ptr = input + (in_h + ker_h_start) * (input_ch * input_x) + in_w * input_ch + in_ch; + const int8_t *kernel_ptr = kernel + ker_h_start * (input_ch * 3) + in_ch; + + for(int32_t ker_h = ker_h_start; ker_h < MIN(3, input_y - in_h); ++ker_h) + { + int32_t in_val = 0; + int32_t ker_val = 0; + + if(ker_w_start == 0) + { + in_val = arm_nn_read_q7x4(input_ptr); + ker_val = arm_nn_read_q7x4(kernel_ptr); + + out_buff0 += ((int8_t)in_val + input_offset) * (int8_t)ker_val; + out_buff1 += ((int8_t)(in_val >> 8) + input_offset) * (int8_t)(ker_val >> 8); + out_buff2 += ((int8_t)(in_val >> 16) + input_offset) * (int8_t)(ker_val >> 16); + out_buff3 += ((int8_t)(in_val >> 24) + input_offset) * (int8_t)(ker_val >> 24); + } + + in_val = arm_nn_read_q7x4(input_ptr + input_ch); + ker_val = arm_nn_read_q7x4(kernel_ptr + input_ch); + + out_buff0 += ((int8_t)in_val + input_offset) * (int8_t)ker_val; + out_buff1 += ((int8_t)(in_val >> 8) + input_offset) * (int8_t)(ker_val >> 8); + out_buff2 += ((int8_t)(in_val >> 16) + input_offset) * (int8_t)(ker_val >> 16); + out_buff3 += ((int8_t)(in_val >> 24) + input_offset) * (int8_t)(ker_val >> 24); + + if((input_x - in_w) >= 3) + { + in_val = arm_nn_read_q7x4(input_ptr + (input_ch << 1)); + ker_val = arm_nn_read_q7x4(kernel_ptr + (input_ch << 1)); + + out_buff0 += ((int8_t)in_val + input_offset) * (int8_t)ker_val; + out_buff1 += ((int8_t)(in_val >> 8) + input_offset) * (int8_t)(ker_val >> 8); + out_buff2 += ((int8_t)(in_val >> 16) + input_offset) * (int8_t)(ker_val >> 16); + out_buff3 += ((int8_t)(in_val >> 24) + input_offset) * (int8_t)(ker_val >> 24); + } + + input_ptr += (input_ch * input_x); + kernel_ptr += (input_ch * 3); + } + + out_buff0 = arm_nn_requantize(out_buff0, output_mult[in_ch + 0], output_shift[in_ch + 0]); + out_buff1 = arm_nn_requantize(out_buff1, output_mult[in_ch + 1], output_shift[in_ch + 1]); + out_buff2 = arm_nn_requantize(out_buff2, output_mult[in_ch + 2], output_shift[in_ch + 2]); + out_buff3 = arm_nn_requantize(out_buff3, output_mult[in_ch + 3], output_shift[in_ch + 3]); + + out_buff0 += output_offset; + out_buff1 += output_offset; + out_buff2 += output_offset; + out_buff3 += output_offset; + + out_buff0 = MIN(MAX(out_buff0, output_activation_min), output_activation_max); + out_buff1 = MIN(MAX(out_buff1, output_activation_min), output_activation_max); + out_buff2 = MIN(MAX(out_buff2, output_activation_min), output_activation_max); + out_buff3 = MIN(MAX(out_buff3, output_activation_min), output_activation_max); + + output[out_idx++] = (int8_t)out_buff0; + output[out_idx++] = (int8_t)out_buff1; + output[out_idx++] = (int8_t)out_buff2; + output[out_idx++] = (int8_t)out_buff3; + } + + // Leftover + for(; in_ch < input_ch; ++in_ch) + { + int32_t out_buff = bias[in_ch]; + + const int8_t *input_ptr = input + (in_h + ker_h_start) * (input_ch * input_x) + in_w * input_ch + in_ch; + const int8_t *kernel_ptr = kernel + ker_h_start * (input_ch * 3) + in_ch; + + for(int32_t ker_h = ker_h_start; ker_h < MIN(3, input_y - in_h); ++ker_h) + { + if(ker_w_start == 0) + { + out_buff += (*(input_ptr) + input_offset) * *(kernel_ptr); + } + + out_buff += (*(input_ptr + input_ch) + input_offset) * *(kernel_ptr + input_ch); + + if((input_x - in_w) >= 3) + { + out_buff += (*(input_ptr + (input_ch << 1)) + input_offset) * *(kernel_ptr + (input_ch << 1)); + } + + input_ptr += (input_ch * input_x); + kernel_ptr += (input_ch * 3); + } + + out_buff = arm_nn_requantize(out_buff, output_mult[in_ch], output_shift[in_ch]); + out_buff += output_offset; + out_buff = MIN(MAX(out_buff, output_activation_min), output_activation_max); + output[out_idx++] = (int8_t)out_buff; + } + } + } + + /* Return to application */ + return ARM_MATH_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_conv_s8.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_conv_s8.c new file mode 100755 index 00000000000..e6a13c19761 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_conv_s8.c @@ -0,0 +1,261 @@ +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_depthwise_conv_s8.c + * Description: s8 version of depthwise convolution. + * + * $Date: March 20, 2020 + * $Revision: V.1.1.1 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ +#include "arm_math.h" +#include "arm_nnfunctions.h" +#include "arm_nnsupportfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + +static void depthwise_conv_s8_mult_4(const int8_t *input, + const int32_t input_x, + const int32_t input_y, + const int32_t input_ch, + const int8_t *kernel, + const int32_t output_ch, + const int32_t ch_mult, + const int32_t kernel_x, + const int32_t kernel_y, + const int32_t pad_x, + const int32_t pad_y, + const int32_t stride_x, + const int32_t stride_y, + const int32_t *bias, + int8_t *output, + const int32_t *output_shift, + const int32_t *output_mult, + const int32_t output_x, + const int32_t output_y, + const int32_t output_offset, + const int32_t input_offset, + const int32_t output_activation_min, + const int32_t output_activation_max) +{ + for (int32_t in_h = -pad_y, out_h = 0, out_idx = 0; out_h < output_y; in_h += stride_y, ++out_h) + { + for (int32_t in_w = -pad_x, out_w = 0, ker_h_start = MAX(0, -in_h); out_w < output_x; in_w += stride_x, ++out_w) + { + for (int32_t in_ch = 0, out_ch = 0, ker_w_start = MAX(0, -in_w); out_ch < output_ch; ++in_ch, out_ch += ch_mult) + { + for (int mult_tile = 0; mult_tile < ch_mult; mult_tile += 4) + { + int32_t out_buff[4]; + + out_buff[0] = bias[out_ch + 0 + mult_tile]; + out_buff[1] = bias[out_ch + 1 + mult_tile]; + out_buff[2] = bias[out_ch + 2 + mult_tile]; + out_buff[3] = bias[out_ch + 3 + mult_tile]; + + for (int32_t ker_h = ker_h_start; ker_h < MIN(kernel_y, input_y - in_h); ++ker_h) + { + int32_t ker_idx = ker_h * (output_ch * kernel_x) + ker_w_start * output_ch + out_ch; + int32_t in_idx = (in_h + ker_h) * (input_ch * input_x) + in_w * input_ch + in_ch; + + for (int32_t ker_w = ker_w_start; ker_w < MIN(kernel_x, input_x - in_w); ++ker_w, ker_idx += output_ch) + { + int32_t in_val = input[in_idx + ker_w * input_ch] + input_offset; + out_buff[0] += in_val * kernel[ker_idx + 0 + mult_tile]; + out_buff[1] += in_val * kernel[ker_idx + 1 + mult_tile]; + out_buff[2] += in_val * kernel[ker_idx + 2 + mult_tile]; + out_buff[3] += in_val * kernel[ker_idx + 3 + mult_tile]; + } + } +#if defined(ARM_MATH_MVEI) + (void)out_idx; + int32x4_t res = vldrwq_s32(out_buff); + res = arm_requantize_mve_32x4(res, vldrwq_s32(&output_mult[out_ch + mult_tile]), vldrwq_s32(&output_shift[out_ch + mult_tile])); + res = vaddq_n_s32(res, output_offset); + + res = vmaxq_s32(res, vdupq_n_s32(output_activation_min)); + res = vminq_s32(res, vdupq_n_s32(output_activation_max)); + vstrbq_s32(output, res); + output += 4; +#else + out_buff[0] = arm_nn_requantize(out_buff[0], output_mult[out_ch + 0 + mult_tile], output_shift[out_ch + 0 + mult_tile]); + out_buff[1] = arm_nn_requantize(out_buff[1], output_mult[out_ch + 1 + mult_tile], output_shift[out_ch + 1 + mult_tile]); + out_buff[2] = arm_nn_requantize(out_buff[2], output_mult[out_ch + 2 + mult_tile], output_shift[out_ch + 2 + mult_tile]); + out_buff[3] = arm_nn_requantize(out_buff[3], output_mult[out_ch + 3 + mult_tile], output_shift[out_ch + 3 + mult_tile]); + + out_buff[0] += output_offset; + out_buff[1] += output_offset; + out_buff[2] += output_offset; + out_buff[3] += output_offset; + + out_buff[0] = MIN(MAX(out_buff[0], output_activation_min), output_activation_max); + out_buff[1] = MIN(MAX(out_buff[1], output_activation_min), output_activation_max); + out_buff[2] = MIN(MAX(out_buff[2], output_activation_min), output_activation_max); + out_buff[3] = MIN(MAX(out_buff[3], output_activation_min), output_activation_max); + + output[out_idx++] = (int8_t)out_buff[0]; + output[out_idx++] = (int8_t)out_buff[1]; + output[out_idx++] = (int8_t)out_buff[2]; + output[out_idx++] = (int8_t)out_buff[3]; + +#endif + } + } + } + } +} + +static void depthwise_conv_s8_generic(const q7_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_ch, + const q7_t *kernel, + const uint16_t output_ch, + const uint16_t ch_mult, + const uint16_t kernel_x, + const uint16_t kernel_y, + const uint16_t pad_x, + const uint16_t pad_y, + const uint16_t stride_x, + const uint16_t stride_y, + const int32_t *bias, + q7_t *output, + const int32_t *output_shift, + const int32_t *output_mult, + const uint16_t output_x, + const uint16_t output_y, + const int32_t output_offset, + const int32_t input_offset, + const int32_t output_activation_min, + const int32_t output_activation_max) +{ + (void)output_ch; + int i_out = 0; + for (int i_out_y = 0; i_out_y < output_y; i_out_y++) + { + const int16_t base_idx_y = (i_out_y * stride_y) - pad_y; + for (int i_out_x = 0; i_out_x < output_x; i_out_x++) + { + const int16_t base_idx_x = (i_out_x * stride_x) - pad_x; + for (int i_input_ch = 0; i_input_ch < input_ch; i_input_ch++) + { + for (int i_ch_mult = 0; i_ch_mult < ch_mult; i_ch_mult++) + { + const int idx_out_ch = i_ch_mult + i_input_ch * ch_mult; + int32_t acc_0; + /* Condition for kernel start dimension: (base_idx_ + ker__start) >= 0 */ + const int ker_y_start = MAX(0, -base_idx_y); + const int ker_x_start = MAX(0, -base_idx_x); + /* Condition for kernel end dimension: (base_idx_ + ker__end) < input_ */ + const int ker_y_end = MIN(kernel_y, input_y - base_idx_y); + const int ker_x_end = MIN(kernel_x, input_x - base_idx_x); + acc_0 = bias[idx_out_ch]; + + for (int i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) + { + const int32_t idx_y = base_idx_y + i_ker_y; + for (int i_ker_x = ker_x_start; i_ker_x < ker_x_end; i_ker_x++) + { + const int32_t idx_x = base_idx_x + i_ker_x; + int32_t idx_0 = (idx_y * input_x + idx_x) * input_ch + i_input_ch; + int32_t ker_idx_0 = (i_ker_y * kernel_x + i_ker_x) * (input_ch * ch_mult) + idx_out_ch; + + acc_0 += (input[idx_0] + input_offset) * kernel[ker_idx_0]; + } + } + + /* Requantize and clamp output to provided range */ + acc_0 = arm_nn_requantize(acc_0, output_mult[idx_out_ch], output_shift[idx_out_ch]); + acc_0 += output_offset; + acc_0 = MAX(acc_0, output_activation_min); + acc_0 = MIN(acc_0, output_activation_max); + + output[i_out++] = acc_0; + } + } + } + } +} + + /* + * Basic s8 depthwise convolution function. + * + * Refer header file for details. + * Optimization using DSP extension is not available for the generic case where channel multiplier is > 1. + * + */ +arm_status arm_depthwise_conv_s8(const q7_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_ch, + const q7_t *kernel, + const uint16_t output_ch, + const uint16_t ch_mult, + const uint16_t kernel_x, + const uint16_t kernel_y, + const uint16_t pad_x, + const uint16_t pad_y, + const uint16_t stride_x, + const uint16_t stride_y, + const int32_t *bias, + q7_t *output, + const int32_t *output_shift, + const int32_t *output_mult, + const uint16_t output_x, + const uint16_t output_y, + const int32_t output_offset, + const int32_t input_offset, + const int32_t output_activation_min, + const int32_t output_activation_max, + const uint16_t dilation_x, + const uint16_t dilation_y, + q15_t *buffer_a) +{ + (void)dilation_x; + (void)dilation_y; + (void)buffer_a; + + if(ch_mult % 4 == 0) + { + depthwise_conv_s8_mult_4(input, input_x, input_y, input_ch, kernel, output_ch, ch_mult, kernel_x, kernel_y, pad_x, pad_y, stride_x, stride_y, bias, + output, output_shift, output_mult, output_x, output_y, output_offset, input_offset, output_activation_min, output_activation_max); + } + else + { + depthwise_conv_s8_generic(input, input_x, input_y, input_ch, kernel, output_ch, ch_mult, kernel_x, kernel_y, pad_x, pad_y, stride_x, stride_y, bias, + output, output_shift, output_mult, output_x, output_y, output_offset, input_offset, output_activation_min, output_activation_max); + } + + /* Return to application */ + return ARM_MATH_SUCCESS; +} + +/** + * @} end of NNConv group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_conv_s8_opt.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_conv_s8_opt.c new file mode 100755 index 00000000000..fc7dbdf00cb --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_conv_s8_opt.c @@ -0,0 +1,440 @@ +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_depthwise_conv_s8_opt.c + * Description: Optimized s8 depthwise separable convolution function for + * channel multiplier of 1. + * + * $Date: March 12, 2020 + * $Revision: V.1.5.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnsupportfunctions.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * Optimized s8 depthwise convolution function with constraint that in_channel equals out_channel + * + * Refer prototype header file for details. + * + */ + +arm_status arm_depthwise_conv_s8_opt(const q7_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_ch, + const q7_t *kernel, + const uint16_t output_ch, + const uint16_t kernel_x, + const uint16_t kernel_y, + const uint16_t pad_x, + const uint16_t pad_y, + const uint16_t stride_x, + const uint16_t stride_y, + const int32_t *bias, + q7_t *output, + const int32_t *output_shift, + const int32_t *output_mult, + const uint16_t output_x, + const uint16_t output_y, + const int32_t output_offset, + const int32_t input_offset, + const int32_t output_activation_min, + const int32_t output_activation_max, + const uint16_t dilation_x, + const uint16_t dilation_y, + q15_t *buffer_a) +{ + /* Check input constraints input_ch == output_ch */ + if (input_ch != output_ch) + { + return ARM_MATH_SIZE_MISMATCH; + } +#ifdef ARM_MATH_MVEI + (void)dilation_x; + (void)dilation_y; + + /* Generate two columns from the input tensor */ + q7_t *lhs_buffer = (q7_t *)buffer_a; + q7_t *out = output; + int padded = 0; + int buffer_count = 0; + const int32_t kernel_size = kernel_x * kernel_y; + + /* This part implements the im2col function */ + for (int i_out_y = 0, base_idx_y = -pad_y; i_out_y < output_y; base_idx_y += stride_y, i_out_y++) + { + for (int i_out_x = 0, base_idx_x = -pad_x; i_out_x < output_x; base_idx_x += stride_x, i_out_x++) + { + for (int i_ker_y = base_idx_y; i_ker_y < base_idx_y + kernel_y; i_ker_y++) + { + for (int i_ker_x = base_idx_x; i_ker_x < base_idx_x + kernel_x; i_ker_x++) + { + if (i_ker_y < 0 || i_ker_y >= input_y || i_ker_x < 0 || i_ker_x >= input_x) + { + arm_memset_q7(lhs_buffer, (int8_t)-input_offset, input_ch); + padded = 1; + } + else + { + arm_memcpy_q7(lhs_buffer, input + (i_ker_y * input_x + i_ker_x) * input_ch, input_ch); + } + lhs_buffer += input_ch; + } + } + buffer_count++; + + if (buffer_count == 4) + { + lhs_buffer = (q7_t *)buffer_a; + if (padded == 0) + { + out = arm_nn_depthwise_conv_nt_t_s8(lhs_buffer, + kernel, + input_offset, + input_ch, + output_shift, + output_mult, + output_offset, + output_activation_min, + output_activation_max, + kernel_size, + bias, + out); + } + else + { + out = arm_nn_depthwise_conv_nt_t_padded_s8(lhs_buffer, + kernel, + input_offset, + input_ch, + output_shift, + output_mult, + output_offset, + output_activation_min, + output_activation_max, + kernel_size, + bias, + out); + padded = 0; + } + buffer_count = 0; + } + } + } + + /* Handle left over buffers */ + lhs_buffer = (q7_t *)buffer_a; + + for (int i_buf = 0; i_buf < buffer_count; i_buf++) + { + int32_t loop_count = (input_ch + 3) / 4; + + uint32_t num_ch_to_process = input_ch; + for (int i_loop_cnt = 0, offset = 0; i_loop_cnt < loop_count; + num_ch_to_process -= 4, offset += 4, i_loop_cnt++) + { + const int8_t *col_0 = lhs_buffer + (kernel_size * input_ch * i_buf) + offset; + const int8_t *row_0 = kernel + offset; + int32x4_t out_0 = vldrwq_s32(&bias[offset]); + + for (int i_ker = 0; i_ker < kernel_size; i_ker++) + { + const int32x4_t ker_0 = vldrbq_s32(row_0); + + int32x4_t ip_0 = vldrbq_s32(col_0); + ip_0 = vaddq_n_s32(ip_0, input_offset); + out_0 += vmulq_s32(ip_0, ker_0); + + col_0 += input_ch; + row_0 += input_ch; + } + + const int32x4_t mult = vldrwq_s32(&output_mult[offset]); + const int32x4_t shift = vldrwq_s32(&output_shift[offset]); + + out_0 = arm_requantize_mve_32x4(out_0, mult, shift); + out_0 = vaddq_n_s32(out_0, output_offset); + out_0 = vmaxq_s32(out_0, vdupq_n_s32(output_activation_min)); + out_0 = vminq_s32(out_0, vdupq_n_s32(output_activation_max)); + mve_pred16_t p = vctp32q(num_ch_to_process); + vstrbq_p_s32(out, out_0, p); + + out += 4; + } + + const int tail_ch = input_ch & 0x3; + if (tail_ch != 0) + { + out -= (4 - tail_ch); + } + } + +#elif defined(ARM_MATH_DSP) + /* Run the following code in cores using DSP extension */ + (void)dilation_x; + (void)dilation_y; + q15_t *const col_buffer_start = buffer_a; + q15_t *col_buffer = col_buffer_start; + const int32_t *const bias_start_pos = bias; + const q31_t *const out_mult_start_pos = output_mult; + const q31_t *const out_shift_start_pos = output_shift; + uint16_t row_count; + uint16_t row_shift; + + for (int i_out_y = 0; i_out_y < output_y; i_out_y++) + { + const int16_t base_idx_y = (i_out_y * stride_y) - pad_y; + for (int i_out_x = 0; i_out_x < output_x; i_out_x++) + { + const int16_t base_idx_x = (i_out_x * stride_x) - pad_x; + + /* Out of bounds is only considered for the y axis as it provides a contiguous zero'ing opportunity than along + the x axis */ + const int ker_y_start = MAX(0, -base_idx_y); + /* Condition for kernel end dimension: (base_idx_y + ker_y_end) < input_y */ + const int ker_y_end = MIN(kernel_y, input_y - base_idx_y); + + int32_t index = 0; + if (ker_y_start != 0) + { + memset(&col_buffer[index], 0, (kernel_x * input_ch) * ker_y_start * sizeof(q15_t)); + index += (kernel_x * input_ch) * ker_y_start; + } + + for (int i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) + { + const int32_t idx_y = base_idx_y + i_ker_y; + + for (int i_ker_x = 0; i_ker_x < kernel_x; i_ker_x++) + { + const int32_t idx_x = base_idx_x + i_ker_x; + if (idx_x < 0 || idx_x >= input_x) + { + memset(&col_buffer[index], 0, input_ch * sizeof(q15_t)); + } + else + { + arm_q7_to_q15_with_offset((q7_t *)input + (idx_y * input_x + idx_x) * input_ch, &col_buffer[index], input_ch, input_offset); + } + index += input_ch; + } + } + + const int diff = kernel_y - ker_y_end; + if (diff != 0) + { + memset(&col_buffer[index], 0, (kernel_x * input_ch) * diff * sizeof(q15_t)); + } + + row_count = output_ch / 4; + row_shift = 0; + bias = bias_start_pos; + output_mult = out_mult_start_pos; + output_shift = out_shift_start_pos; + + while (row_count) + { + q31_t sum = *bias++; + q31_t sum_2 = *bias++; + q31_t sum_3 = *bias++; + q31_t sum_4 = *bias++; + + uint16_t col_count = (kernel_x * kernel_y) / 2; + q15_t *col_pos = col_buffer_start + row_shift; + const q7_t *row_pos = kernel + row_shift; + row_shift += 4; + + while (col_count) + { + /* General idea is to read 4 + 4 (input, kernel) pair and re-arrange them in the right order to + use in a SMLAD instruction . One run of this loop produces 4 partial outputs with 8 MACs. */ + /* Note: variable names can be improved here to align with rows and columns. */ + q31_t ip_a1, ip_a2, ip_b1, ip_b2, op_a, op_b, op_c; + /* Read 4 weights */ + ip_b1 = arm_nn_read_q7x4(row_pos); + ip_a1 = arm_nn_read_q7x4(row_pos + input_ch); + op_a = arm_nn_read_q15x2(col_pos); + op_b = arm_nn_read_q15x2(col_pos + input_ch); + + ip_a2 = __SXTB16(ip_b1); + ip_b1 = __SXTB16(__ROR(ip_b1, 8)); + + ip_b2 = __SXTB16(ip_a1); + ip_a1 = __SXTB16(__ROR(ip_a1, 8)); + + op_c = __PKHBT(op_b, op_a, 16); + op_a = __PKHTB(op_b, op_a, 16); + op_b = __PKHBT(ip_b2, ip_a2, 16); + sum = __SMLAD(op_c, op_b, sum); + + op_b = __PKHBT(ip_b1, ip_a1, 16); + sum_2 = __SMLAD(op_a, op_b, sum_2); + + op_a = arm_nn_read_q15x2(col_pos + 2); + op_b = arm_nn_read_q15x2(col_pos + input_ch + 2); + + op_c = __PKHBT(op_b, op_a, 16); + op_a = __PKHTB(op_b, op_a, 16); + op_b = __PKHTB(ip_a2, ip_b2, 16); + sum_3 = __SMLAD(op_c, op_b, sum_3); + + op_b = __PKHTB(ip_a1, ip_b1, 16); + sum_4 = __SMLAD(op_a, op_b, sum_4); + + row_pos += input_ch << 1; + col_pos += input_ch << 1; + col_count--; + } + + col_count = (kernel_x * kernel_y) & 0x1; + while (col_count) + { + sum += row_pos[0] * col_pos[0]; + sum_2 += row_pos[1] * col_pos[1]; + sum_3 += row_pos[2] * col_pos[2]; + sum_4 += row_pos[3] * col_pos[3]; + + row_pos += input_ch; + col_pos += input_ch; + + col_count--; + } + sum = arm_nn_requantize(sum, *output_mult++, *output_shift++); + sum += output_offset; + sum = MAX(sum, output_activation_min); + sum = MIN(sum, output_activation_max); + *output++ = (q7_t)sum; + + sum_2 = arm_nn_requantize(sum_2, *output_mult++, *output_shift++); + sum_2 += output_offset; + sum_2 = MAX(sum_2, output_activation_min); + sum_2 = MIN(sum_2, output_activation_max); + *output++ = (q7_t)sum_2; + sum_3 = arm_nn_requantize(sum_3, *output_mult++, *output_shift++); + sum_3 += output_offset; + sum_3 = MAX(sum_3, output_activation_min); + sum_3 = MIN(sum_3, output_activation_max); + *output++ = (q7_t)sum_3; + + sum_4 = arm_nn_requantize(sum_4, *output_mult++, *output_shift++); + sum_4 += output_offset; + sum_4 = MAX(sum_4, output_activation_min); + sum_4 = MIN(sum_4, output_activation_max); + *output++ = (q7_t)sum_4; + + row_count--; + } + + row_count = output_ch & 0x3; + while (row_count) + { + q15_t *col_pos = col_buffer_start + row_shift; + const q7_t *row_pos = kernel + row_shift; + q31_t sum = *bias++; + const uint16_t col_count = (kernel_x * kernel_y); + row_shift += 1; + + for (int i = 0; i < col_count; i++) + { + sum += row_pos[i * input_ch] * col_pos[i * input_ch]; + } + sum = arm_nn_requantize(sum, *output_mult++, *output_shift++); + sum += output_offset; + sum = MAX(sum, output_activation_min); + sum = MIN(sum, output_activation_max); + *output++ = (q7_t)sum; + + row_count--; + } + + // clear counter and pointers + col_buffer = col_buffer_start; + } + } + +#else + (void)buffer_a; + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + return arm_depthwise_conv_s8(input, + input_x, + input_y, + input_ch, + kernel, + output_ch, + 1, + kernel_x, + kernel_y, + pad_x, + pad_y, + stride_x, + stride_y, + bias, + output, + output_shift, + output_mult, + output_x, + output_y, + output_offset, + input_offset, + output_activation_min, + output_activation_max, + dilation_x, + dilation_y, + NULL); +#endif /* ARM_MATH_MVEI | ARM_MATH_DSP */ + + /* Return to application */ + return ARM_MATH_SUCCESS; +} + +int32_t arm_depthwise_conv_s8_opt_get_buffer_size(const uint16_t input_ch, + const uint16_t kernel_x, + const uint16_t kernel_y) +{ +#if defined(ARM_MATH_MVEI) + /* The + 4 accounts for out of bounds read of the lhs buffers in the *_nt_t_* functions. */ + return (2 * input_ch * kernel_x * kernel_y) * sizeof(int16_t) + 4; +#elif defined(ARM_MATH_DSP) + return (input_ch * kernel_x * kernel_y) * sizeof(int16_t); +#else + (void)input_ch; + (void)kernel_x; + (void)kernel_y; + return 0; +#endif +} + +/** + * @} end of NNConv group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_conv_u8_basic_ver1.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_conv_u8_basic_ver1.c new file mode 100755 index 00000000000..b6cd573d316 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_conv_u8_basic_ver1.c @@ -0,0 +1,239 @@ +/* + * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_depthwise_conv_u8_basic_ver1.c + * Description: u8 depthwise convolution function + * + * $Date: June, 2019 + * $Revision: V.0.8.0 + * + * Target : Cortex-M cores with DSP extension + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" +#include +#include + +#define DILATION_X (1) +#define DILATION_Y (1) + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/** + * @brief uint8 depthwise convolution function with asymmetric quantization for even number of channel multiplier + * and input channels. Unless specified otherwise, arguments are mandatory. Both square and non-square inputs + * are accepted. + * + * @param[in] input Pointer to input tensor + * @param[in] input_x Width of input tensor + * @param[in] input_y Height of input tensor + * @param[in] input_ch Channels in input tensor + * @param[in] kernel Pointer to kernel weights + * @param[in] kernel_x Width of kernel + * @param[in] kernel_y Height of kernel + * @param[in] ch_mult Number of channel multiplier + * @param[in] pad_x Padding sizes x + * @param[in] pad_y Padding sizes y + * @param[in] stride_x Convolution stride along the width + * @param[in] stride_y Convolution stride along the height + * @param[in] dilation_x Dilation along width. Not used and intended for future enhancement. + * @param[in] dilation_y Dilation along height. Not used and intended for future enhancement. + * @param[in] bias Pointer to optional bias values. If no bias is + * availble, NULL is expected + * @param[in] input_offset Input tensor zero offset + * @param[in] filter_offset Kernel tensor zero offset + * @param[in] output_offset Output tensor zero offset + * @param[in,out] output Pointer to output tensor + * @param[in] output_x Width of output tensor + * @param[in] output_y Height of output tensor + * @param[in] output_activation_min Minimum value to clamp the output to. Range : {0, 255} + * @param[in] output_activation_max Minimum value to clamp the output to. Range : {0, 255} + * @param[in] out_shift Amount of right-shift for output + * @param[in] out_mult Output multiplier for requantization + * @return The function returns one of the following + * ARM_MATH_SIZE_MISMATCH - Not supported dimension of tensors + * ARM_MATH_SUCCESS - Successful operation + * ARM_MATH_ARGUMENT_ERROR - Implementation not available + * + * Input constraints + * ch_mult is multiple of 2 + * kernel_x is multiple of 2 + * + */ + +arm_status arm_depthwise_conv_u8_basic_ver1(const uint8_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_ch, + const uint8_t *kernel, + const uint16_t kernel_x, + const uint16_t kernel_y, + const int16_t ch_mult, + const int16_t pad_x, + const int16_t pad_y, + const int16_t stride_x, + const int16_t stride_y, + const int16_t dilation_x, + const int16_t dilation_y, + const int32_t *bias, + const int32_t input_offset, + const int32_t filter_offset, + const int32_t output_offset, + uint8_t *output, + const uint16_t output_x, + const uint16_t output_y, + const int32_t output_activation_min, + const int32_t output_activation_max, + const int32_t out_shift, + const int32_t out_mult) +{ + arm_status status = ARM_MATH_SUCCESS; + #if defined (ARM_MATH_DSP) + int i_out = 0; + (void)dilation_x; + (void)dilation_y; + + const int32_t input_offset_pkd = (input_offset & 0xFFFF) | (input_offset & 0xFFFF) << 16; + const int32_t kernel_offset_pkd = (filter_offset & 0xFFFF) | (filter_offset & 0xFFFF) << 16; + + if (0 != ch_mult % 2 || 0 != kernel_x % 2) + { + return ARM_MATH_SIZE_MISMATCH; + } + + for (int i_out_y = 0; i_out_y < output_y; i_out_y++) + { + const int16_t base_idx_y = (i_out_y * stride_y) - pad_y; + for (int i_out_x = 0; i_out_x < output_x; i_out_x++) + { + const int16_t base_idx_x = (i_out_x * stride_x) - pad_x; + for (int i_input_ch = 0; i_input_ch < input_ch; i_input_ch++) + { + for (int i_ch_mult = 0; i_ch_mult < ch_mult; i_ch_mult += 2) + { + const int idx_out_ch = i_ch_mult + i_input_ch * ch_mult; + + int32_t acc_0 = 0; + int32_t acc_1 = 0; + if (NULL != bias) + { + acc_0 = bias[idx_out_ch]; + acc_1 = bias[idx_out_ch + 1]; + } + + for (int i_ker_y = 0; i_ker_y < kernel_y; i_ker_y++) + { + const int32_t idx_y = base_idx_y + DILATION_Y * i_ker_y; + const int32_t y_in_range = (idx_y >= 0) && (idx_y < input_y); + + for (int i_ker_x = 0; i_ker_x < kernel_x; i_ker_x += 2) + { + if (1 == y_in_range) + { + const int32_t idx_x = base_idx_x + DILATION_X * i_ker_x; + const int32_t idx_x1 = base_idx_x + DILATION_X * (i_ker_x + 1); + /* Range check for first input */ + if (idx_x >= 0 && idx_x < input_x) + { + const int32_t idx_0 = (idx_y * input_x + idx_x) * input_ch + i_input_ch; + + const int32_t ker_idx_0 = + (i_ker_y * kernel_x + i_ker_x) * (input_ch * ch_mult) + idx_out_ch; + const int32_t ker_idx_1 = ker_idx_0 + input_ch * ch_mult; + + int32_t input_pkd = input[idx_0] | (input[idx_0 + input_ch] << 16); + int32_t kernel_pkd = kernel[ker_idx_0] | (kernel[ker_idx_1] << 16); + + input_pkd = __SADD16(input_pkd, input_offset_pkd); + kernel_pkd = __SADD16(kernel_pkd, kernel_offset_pkd); + /* Range check for second input */ + if (idx_x1 >= input_x) + { + input_pkd &= 0xFFFF; + } + acc_0 = __SMLAD(input_pkd, kernel_pkd, acc_0); + + kernel_pkd = kernel[ker_idx_0 + 1] | (kernel[ker_idx_1 + 1] << 16); + kernel_pkd = __SADD16(kernel_pkd, kernel_offset_pkd); + acc_1 = __SMLAD(input_pkd, kernel_pkd, acc_1); + } + } + } + } + + /* Requantize and clamp output to provided range */ + acc_0 = arm_nn_divide_by_power_of_two(arm_nn_sat_doubling_high_mult( + acc_0 * (1 << LEFT_SHIFT(out_shift)), out_mult), + RIGHT_SHIFT(out_shift)); + + acc_0 += output_offset; + + if (output_activation_min > acc_0) + { + acc_0 = output_activation_min; + } + + if (acc_0 > output_activation_max) + { + acc_0 = output_activation_max; + } + output[i_out++] = acc_0; + + /* Requantize and clamp output to provided range */ + acc_1 = arm_nn_divide_by_power_of_two(arm_nn_sat_doubling_high_mult( + acc_1 * (1 << LEFT_SHIFT(out_shift)), out_mult), + RIGHT_SHIFT(out_shift)); + acc_1 += output_offset; + + if (output_activation_min > acc_1) + { + acc_1 = output_activation_min; + } + + if (acc_1 > output_activation_max) + { + acc_1 = output_activation_max; + } + output[i_out++] = acc_1; + } + } + } + } +#else + /* No available implementation. */ + status = ARM_MATH_ARGUMENT_ERROR; +#endif + return status; +} + +/** + * @} end of NNConv group + */ + + diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7.c new file mode 100755 index 00000000000..a7eaf93f271 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7.c @@ -0,0 +1,418 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_depthwise_separable_conv_HWC_q7.c + * Description: Q7 depthwise separable convolution function + * + * $Date: 17. January 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/** + * @brief Q7 depthwise separable convolution function + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimention + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + * + * @details + * + * Buffer size: + * + * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel + * + * bufferB size: 0 + * + * Input dimension constraints: + * + * ch_im_in equals ch_im_out + * + * Implementation: + * There are 3 nested loop here: + * Inner loop: calculate each output value with MAC instruction over an accumulator + * Mid loop: loop over different output channel + * Outer loop: loop over different output (x, y) + */ + +arm_status arm_depthwise_separable_conv_HWC_q7(const q7_t * Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q7_t * wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q7_t * bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t * Im_out, + const uint16_t dim_im_out, + q15_t * bufferA, + q7_t * bufferB) +{ + (void)bufferB; +#if defined (ARM_MATH_DSP) + /* Run the following code for Cortex-M4 and Cortex-M7 */ + + int16_t i_out_y, i_out_x; + int16_t i_ker_y, i_ker_x; + q7_t *colBuffer = (q7_t *) bufferA; + q7_t *pBuffer = colBuffer; + const q7_t *pBias = bias; + q7_t *pOut = Im_out; + uint16_t rowCnt; + uint16_t row_shift; + + /* do some checking here, basically ch_im_in == ch_im_out */ + if (ch_im_in != ch_im_out) + { + return ARM_MATH_SIZE_MISMATCH; + } + + for (i_out_y = 0; i_out_y < dim_im_out; i_out_y++) + { + for (i_out_x = 0; i_out_x < dim_im_out; i_out_x++) + { + /* we first do im2col here */ + for (i_ker_y = i_out_y * stride - padding; i_ker_y < i_out_y * stride - padding + dim_kernel; i_ker_y++) + { + for (i_ker_x = i_out_x * stride - padding; i_ker_x < i_out_x * stride - padding + dim_kernel; i_ker_x++) + { + if (i_ker_y < 0 || i_ker_y >= dim_im_in || i_ker_x < 0 || i_ker_x >= dim_im_in) + { + /* arm_fill_q7(0, pBuffer, ch_im_in); */ + memset(pBuffer, 0, ch_im_in); + } else + { + /* arm_copy_q7((q7_t *) Im_in + (i_ker_y * dim_im_in + i_ker_x) * ch_im_in, pBuffer, ch_im_in); */ + memcpy(pBuffer, (q7_t *) Im_in + (i_ker_y * dim_im_in + i_ker_x) * ch_im_in, ch_im_in); + } + pBuffer += ch_im_in; + } + } + + /* we will do the computation here for each channel */ + rowCnt = ch_im_out >> 2; + row_shift = 0; + pBias = bias; + + while (rowCnt) + { + q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum3 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum4 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + + uint16_t colCnt = (dim_kernel * dim_kernel) >> 1; + q7_t *pB = colBuffer + row_shift; + const q7_t *pA = wt + row_shift; + row_shift += 4; + +#ifdef USE_INTRINSIC + +#ifndef ARM_MATH_BIG_ENDIAN + + while (colCnt) + { + q31_t inA1, inA2, inB1, inB2, opA, opB; + + inB1 = arm_nn_read_q7x4(pB); + pB += ch_im_in; + opB = arm_nn_read_q7x4(pB); + pB += ch_im_in; + inB2 = __PKHTB(opB, inB1, 16); + inB1 = __PKHBT(inB1, opB, 16); + inA1 = arm_nn_read_q7x4(pA); + pA += ch_im_in; + opB = arm_nn_read_q7x4(pA); + pA += ch_im_in; + inA2 = __PKHTB(opB, inA1, 16); + inA1 = __PKHBT(inA1, opB, 16); + opA = __SXTB16(inA1); + opB = __SXTB16(inB1); + sum = __SMLAD(opA, opB, sum); + opA = __SXTB16(__ROR(inA1, 8)); + opB = __SXTB16(__ROR(inB1, 8)); + sum2 = __SMLAD(opA, opB, sum2); + opA = __SXTB16(inA2); + opB = __SXTB16(inB2); + sum3 = __SMLAD(opA, opB, sum3); + opA = __SXTB16(__ROR(inA2, 8)); + opB = __SXTB16(__ROR(inB2, 8)); + sum4 = __SMLAD(opA, opB, sum4); + colCnt--; + } +#else + + while (colCnt) + { + q31_t inA1, inA2, inB1, inB2, opA, opB; + + inB1 = arm_nn_read_q7x4(pB); + pB += ch_im_in; + opB = arm_nn_read_q7x4(pB); + pB += ch_im_in; + inB2 = __PKHBT(opB, inB1, 16); + inB1 = __PKHTB(inB1, opB, 16); + inA1 = arm_nn_read_q7x4(pA); + pA += ch_im_in; + opB = arm_nn_read_q7x4(pA); + pA += ch_im_in; + inA2 = __PKHBT(opB, inA1, 16); + inA1 = __PKHTB(inA1, opB, 16); + opA = __SXTB16(inA1); + opB = __SXTB16(inB1); + sum2 = __SMLAD(opA, opB, sum2); + opA = __SXTB16(__ROR(inA1, 8)); + opB = __SXTB16(__ROR(inB1, 8)); + sum = __SMLAD(opA, opB, sum); + opA = __SXTB16(inA2); + opB = __SXTB16(inB2); + sum4 = __SMLAD(opA, opB, sum4); + opA = __SXTB16(__ROR(inA2, 8)); + opB = __SXTB16(__ROR(inB2, 8)); + sum3 = __SMLAD(opA, opB, sum3); + colCnt--; + } + +#endif /* ARM_MATH_BIG_ENDIAN */ + +#else + +#ifndef ARM_MATH_BIG_ENDIAN + /* + * r0 r1 r2 r3 r4 r5 + * inA1, inA2, inB1, inB2, opA, opB + */ + + asm volatile ("COL_LOOP_%=:\n" + "ldr.w r2, [%[pB], #0]\n" + "add.w %[pB], %[pB], %[ch_im_in]\n" + "ldr.w r5, [%[pB], #0]\n" + "add.w %[pB], %[pB], %[ch_im_in]\n" + "pkhtb r3, r5, r2, ASR #16\n" + "pkhbt r2, r2, r5, LSL #16\n" + "ldr.w r0, [%[pA], #0]\n" + "add.w %[pA], %[pA], %[ch_im_in]\n" + "ldr.w r5, [%[pA], #0]\n" + "add.w %[pA], %[pA], %[ch_im_in]\n" + "pkhtb r1, r5, r0, ASR #16\n" + "pkhbt r0, r0, r5, LSL #16\n" + "sxtb16 r4, r0\n" + "sxtb16 r5, r2\n" + "smlad %[sum], r4, r5, %[sum]\n" + "mov.w r4, r0, ror #8\n" + "mov.w r5, r2, ror #8\n" + "sxtb16 r4, r4\n" + "sxtb16 r5, r5\n" + "smlad %[sum2], r4, r5, %[sum2]\n" + "sxtb16 r4, r1\n" + "sxtb16 r5, r3\n" + "smlad %[sum3], r4, r5, %[sum3]\n" + "mov.w r4, r1, ror #8\n" + "mov.w r5, r3, ror #8\n" + "sxtb16 r4, r4\n" + "sxtb16 r5, r5\n" + "smlad %[sum4], r4, r5, %[sum4]\n" + "subs %[colCnt], #1\n" + "bne COL_LOOP_%=\n":[sum] + "+r"(sum),[sum2] "+r"(sum2), + [sum3] "+r"(sum3), + [sum4] "+r"(sum4),[pB] "+r"(pB), + [pA] "+r"(pA):[colCnt] + "r"(colCnt),[ch_im_in] "r"(ch_im_in):"r0", "r1", "r2", "r3", "r4", "r5"); +#else + /* + * r0 r1 r2 r3 r4 r5 + * inA1, inA2, inB1, inB2, opA, opB + */ + asm volatile ("COL_LOOP_%=:\n" + "ldr.w r2, [%[pB], #0]\n" + "add.w %[pB], %[pB], %[ch_im_in]\n" + "ldr.w r5, [%[pB], #0]\n" + "add.w %[pB], %[pB], %[ch_im_in]\n" + "pkhbt r3, r5, r2, LSL #16\n" + "pkhtb r2, r2, r5, ASR #16\n" + "ldr.w r0, [%[pA], #0]\n" + "add.w %[pA], %[pA], %[ch_im_in]\n" + "ldr.w r5, [%[pA], #0]\n" + "add.w %[pA], %[pA], %[ch_im_in]\n" + "pkhbt r1, r5, r0, LSL #16\n" + "pkhtb r0, r0, r5, ASR #16\n" + "sxtb16 r4, r0\n" + "sxtb16 r5, r2\n" + "smlad %[sum2], r4, r5, %[sum2]\n" + "mov.w r4, r0, ror #8\n" + "mov.w r5, r2, ror #8\n" + "sxtb16 r4, r4\n" + "sxtb16 r5, r5\n" + "smlad %[sum], r4, r5, %[sum]\n" + "sxtb16 r4, r1\n" + "sxtb16 r5, r3\n" + "smlad %[sum4], r4, r5, %[sum4]\n" + "mov.w r4, r1, ror #8\n" + "mov.w r5, r3, ror #8\n" + "sxtb16 r4, r4\n" + "sxtb16 r5, r5\n" + "smlad %[sum3], r4, r5, %[sum3]\n" + "subs %[colCnt], #1\n" + "bne COL_LOOP_%=\n":[sum] + "+r"(sum),[sum2] "+r"(sum2), + [sum3] "+r"(sum3), + [sum4] "+r"(sum4),[pB] "+r"(pB), + [pA] "+r"(pA):[colCnt] + "r"(colCnt),[ch_im_in] "r"(ch_im_in):"r0", "r1", "r2", "r3", "r4", "r5"); + +#endif /* ARM_MATH_BIG_ENDIAN */ + +#endif /* USE_INTRINSIC */ + + colCnt = (dim_kernel * dim_kernel) & 0x1; + while (colCnt) + { + union arm_nnword inA, inB; + inA.word = arm_nn_read_q7x4(pA); + pA += ch_im_in; + inB.word = arm_nn_read_q7x4(pB); + pB += ch_im_in; + sum += inA.bytes[0] * inB.bytes[0]; + sum2 += inA.bytes[1] * inB.bytes[1]; + sum3 += inA.bytes[2] * inB.bytes[2]; + sum4 += inA.bytes[3] * inB.bytes[3]; + colCnt--; + } + + *pOut++ = (q7_t) __SSAT((sum >> out_shift), 8); + *pOut++ = (q7_t) __SSAT((sum2 >> out_shift), 8); + *pOut++ = (q7_t) __SSAT((sum3 >> out_shift), 8); + *pOut++ = (q7_t) __SSAT((sum4 >> out_shift), 8); + + rowCnt--; + } + + rowCnt = ch_im_out & 0x3; + while (rowCnt) + { + q7_t *pB = colBuffer + row_shift; + const q7_t *pA = wt + row_shift; + q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + uint16_t colCnt = (dim_kernel * dim_kernel); + + row_shift += 1; + + while (colCnt) + { + q7_t A1 = *pA; + q7_t B1 = *pB; + pA += ch_im_in; + pB += ch_im_in; + sum += A1 * B1; + + colCnt--; + } + *pOut++ = (q7_t) __SSAT((sum >> out_shift), 8); + rowCnt--; + } + + /* clear counter and pointers */ + pBuffer = colBuffer; + } + } + +#else + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + int i_out_y, i_out_x, i_ch_out, i_ker_x, i_ker_y; + int conv_out; + + /* do some checking here, basically ch_im_in == ch_im_out */ + if (ch_im_in != ch_im_out) + { + return ARM_MATH_SIZE_MISMATCH; + } + + for (i_out_y = 0; i_out_y < dim_im_out; i_out_y++) + { + for (i_out_x = 0; i_out_x < dim_im_out; i_out_x++) + { + for (i_ch_out = 0; i_ch_out < ch_im_out; i_ch_out++) + { + // for each output + conv_out = ((q31_t)(bias[i_ch_out]) << bias_shift) + NN_ROUND(out_shift); + for (i_ker_y = 0; i_ker_y < dim_kernel; i_ker_y++) + { + for (i_ker_x = 0; i_ker_x < dim_kernel; i_ker_x++) + { + int in_row = stride * i_out_y + i_ker_y - padding; + int in_col = stride * i_out_x + i_ker_x - padding; + if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in && in_col < dim_im_in) + { + conv_out += + Im_in[(in_row * + dim_im_in + + in_col) * + ch_im_in + + i_ch_out] * wt[(i_ker_y * dim_kernel + i_ker_x) * ch_im_out + i_ch_out]; + } + } + } + Im_out[(i_out_y * dim_im_out + + i_out_x) * ch_im_out + i_ch_out] = (q7_t) __SSAT((conv_out >> out_shift), 8); + } + } + } + +#endif /* ARM_MATH_DSP */ + + /* Return to application */ + return ARM_MATH_SUCCESS; + +} + +/** + * @} end of NNConv group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7_nonsquare.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7_nonsquare.c new file mode 100755 index 00000000000..53143da3e4b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7_nonsquare.c @@ -0,0 +1,413 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_depthwise_separable_conv_HWC_q7_nonsquare.c + * Description: Q7 depthwise separable convolution function (non-square shape) + * + * $Date: 17. January 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/** + * @brief Q7 depthwise separable convolution function (non-square shape) + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in_x input tensor dimention x + * @param[in] dim_im_in_y input tensor dimention y + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel_x filter kernel size x + * @param[in] dim_kernel_y filter kernel size y + * @param[in] padding_x padding sizes x + * @param[in] padding_y padding sizes y + * @param[in] stride_x convolution stride x + * @param[in] stride_y convolution stride y + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out_x output tensor dimension x + * @param[in] dim_im_out_y output tensor dimension y + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + * + * This function is the version with full list of optimization tricks, but with + * some contraints: + * ch_im_in is equal to ch_im_out + * + */ + +arm_status arm_depthwise_separable_conv_HWC_q7_nonsquare(const q7_t * Im_in, + const uint16_t dim_im_in_x, + const uint16_t dim_im_in_y, + const uint16_t ch_im_in, + const q7_t * wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel_x, + const uint16_t dim_kernel_y, + const uint16_t padding_x, + const uint16_t padding_y, + const uint16_t stride_x, + const uint16_t stride_y, + const q7_t * bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t * Im_out, + const uint16_t dim_im_out_x, + const uint16_t dim_im_out_y, + q15_t * bufferA, + q7_t * bufferB) +{ + + (void)bufferB; + +#if defined (ARM_MATH_DSP) + /* Run the following code for Cortex-M4 and Cortex-M7 */ + +/* + * Implementation: + * There are 3 nested loop here: + * Inner loop: calculate each output value with MAC instruction over an accumulator + * Mid loop: loop over different output channel + * Outer loop: loop over different output (x, y) + * + */ + + int16_t i_out_y, i_out_x; + int16_t i_ker_y, i_ker_x; + q7_t *colBuffer = (q7_t *) bufferA; + q7_t *pBuffer = colBuffer; + const q7_t *pBias = bias; + q7_t *pOut = Im_out; + uint16_t rowCnt; + uint16_t row_shift; + + /* do some checking here, basically ch_im_in == ch_im_out */ + if (ch_im_in != ch_im_out) + { + return ARM_MATH_SIZE_MISMATCH; + } + + for (i_out_y = 0; i_out_y < dim_im_out_y; i_out_y++) + { + for (i_out_x = 0; i_out_x < dim_im_out_x; i_out_x++) + { + /* we first do im2col here */ + for (i_ker_y = i_out_y * stride_y - padding_y; i_ker_y < i_out_y * stride_y - padding_y + dim_kernel_y; + i_ker_y++) + { + for (i_ker_x = i_out_x * stride_x - padding_x; i_ker_x < i_out_x * stride_x - padding_x + dim_kernel_x; + i_ker_x++) + { + if (i_ker_y < 0 || i_ker_y >= dim_im_in_y || i_ker_x < 0 || i_ker_x >= dim_im_in_x) + { + /* arm_fill_q7(0, pBuffer, ch_im_in); */ + memset(pBuffer, 0, ch_im_in); + } else + { + /* arm_copy_q7((q7_t *) Im_in + (i_ker_y * dim_im_in_x + i_ker_x) * ch_im_in, pBuffer, ch_im_in); */ + memcpy(pBuffer, (q7_t *) Im_in + (i_ker_y * dim_im_in_x + i_ker_x) * ch_im_in, ch_im_in); + } + pBuffer += ch_im_in; + } + } + + /* we will do the computation here for each channel */ + rowCnt = ch_im_out >> 2; + row_shift = 0; + pBias = bias; + + while (rowCnt) + { + q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum3 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum4 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + + uint16_t colCnt = (dim_kernel_x * dim_kernel_y) >> 1; + q7_t *pB = colBuffer + row_shift; + const q7_t *pA = wt + row_shift; + row_shift += 4; + +#ifdef USE_INTRINSIC + +#ifndef ARM_MATH_BIG_ENDIAN + + while (colCnt) + { + q31_t inA1, inA2, inB1, inB2, opA, opB; + + inB1 = arm_nn_read_q7x4(pB); + pB += ch_im_in; + opB = arm_nn_read_q7x4(pB); + pB += ch_im_in; + inB2 = __PKHTB(opB, inB1, 16); + inB1 = __PKHBT(inB1, opB, 16); + inA1 = arm_nn_read_q7x4(pA); + pA += ch_im_in; + opB = arm_nn_read_q7x4(pA); + pA += ch_im_in; + inA2 = __PKHTB(opB, inA1, 16); + inA1 = __PKHBT(inA1, opB, 16); + opA = __SXTB16(inA1); + opB = __SXTB16(inB1); + sum = __SMLAD(opA, opB, sum); + opA = __SXTB16(__ROR(inA1, 8)); + opB = __SXTB16(__ROR(inB1, 8)); + sum2 = __SMLAD(opA, opB, sum2); + opA = __SXTB16(inA2); + opB = __SXTB16(inB2); + sum3 = __SMLAD(opA, opB, sum3); + opA = __SXTB16(__ROR(inA2, 8)); + opB = __SXTB16(__ROR(inB2, 8)); + sum4 = __SMLAD(opA, opB, sum4); + colCnt--; + } +#else + + while (colCnt) + { + q31_t inA1, inA2, inB1, inB2, opA, opB; + + inB1 = arm_nn_read_q7x4(pB); + pB += ch_im_in; + opB = arm_nn_read_q7x4(pB); + pB += ch_im_in; + inB2 = __PKHBT(opB, inB1, 16); + inB1 = __PKHTB(inB1, opB, 16); + inA1 = arm_nn_read_q7x4(pA); + pA += ch_im_in; + opB = arm_nn_read_q7x4(pA); + pA += ch_im_in; + inA2 = __PKHBT(opB, inA1, 16); + inA1 = __PKHTB(inA1, opB, 16); + opA = __SXTB16(inA1); + opB = __SXTB16(inB1); + sum2 = __SMLAD(opA, opB, sum2); + opA = __SXTB16(__ROR(inA1, 8)); + opB = __SXTB16(__ROR(inB1, 8)); + sum = __SMLAD(opA, opB, sum); + opA = __SXTB16(inA2); + opB = __SXTB16(inB2); + sum4 = __SMLAD(opA, opB, sum4); + opA = __SXTB16(__ROR(inA2, 8)); + opB = __SXTB16(__ROR(inB2, 8)); + sum3 = __SMLAD(opA, opB, sum3); + colCnt--; + } + +#endif /* ARM_MATH_BIG_ENDIAN */ + +#else + +#ifndef ARM_MATH_BIG_ENDIAN + // r0 r1 r2 r3 r4 r5 + // inA1, inA2, inB1, inB2, opA, opB + asm volatile ("COL_LOOP:\n" + "ldr.w r2, [%[pB], #0]\n" + "add.w %[pB], %[pB], %[ch_im_in]\n" + "ldr.w r5, [%[pB], #0]\n" + "add.w %[pB], %[pB], %[ch_im_in]\n" + "pkhtb r3, r5, r2, ASR #16\n" + "pkhbt r2, r2, r5, LSL #16\n" + "ldr.w r0, [%[pA], #0]\n" + "add.w %[pA], %[pA], %[ch_im_in]\n" + "ldr.w r5, [%[pA], #0]\n" + "add.w %[pA], %[pA], %[ch_im_in]\n" + "pkhtb r1, r5, r0, ASR #16\n" + "pkhbt r0, r0, r5, LSL #16\n" + "sxtb16 r4, r0\n" + "sxtb16 r5, r2\n" + "smlad %[sum], r4, r5, %[sum]\n" + "mov.w r4, r0, ror #8\n" + "mov.w r5, r2, ror #8\n" + "sxtb16 r4, r4\n" + "sxtb16 r5, r5\n" + "smlad %[sum2], r4, r5, %[sum2]\n" + "sxtb16 r4, r1\n" + "sxtb16 r5, r3\n" + "smlad %[sum3], r4, r5, %[sum3]\n" + "mov.w r4, r1, ror #8\n" + "mov.w r5, r3, ror #8\n" + "sxtb16 r4, r4\n" + "sxtb16 r5, r5\n" + "smlad %[sum4], r4, r5, %[sum4]\n" + "subs %[colCnt], #1\n" + "bne COL_LOOP\n":[sum] "+r"(sum),[sum2] "+r"(sum2),[sum3] "+r"(sum3), + [sum4] "+r"(sum4),[pB] "+r"(pB),[pA] "+r"(pA):[colCnt] "r"(colCnt), + [ch_im_in] "r"(ch_im_in):"r0", "r1", "r2", "r3", "r4", "r5"); +#else + // r0 r1 r2 r3 r4 r5 + // inA1, inA2, inB1, inB2, opA, opB + asm volatile ("COL_LOOP:\n" + "ldr.w r2, [%[pB], #0]\n" + "add.w %[pB], %[pB], %[ch_im_in]\n" + "ldr.w r5, [%[pB], #0]\n" + "add.w %[pB], %[pB], %[ch_im_in]\n" + "pkhbt r3, r5, r2, LSL #16\n" + "pkhtb r2, r2, r5, ASR #16\n" + "ldr.w r0, [%[pA], #0]\n" + "add.w %[pA], %[pA], %[ch_im_in]\n" + "ldr.w r5, [%[pA], #0]\n" + "add.w %[pA], %[pA], %[ch_im_in]\n" + "pkhbt r1, r5, r0, LSL #16\n" + "pkhtb r0, r0, r5, ASR #16\n" + "sxtb16 r4, r0\n" + "sxtb16 r5, r2\n" + "smlad %[sum2], r4, r5, %[sum2]\n" + "mov.w r4, r0, ror #8\n" + "mov.w r5, r2, ror #8\n" + "sxtb16 r4, r4\n" + "sxtb16 r5, r5\n" + "smlad %[sum], r4, r5, %[sum]\n" + "sxtb16 r4, r1\n" + "sxtb16 r5, r3\n" + "smlad %[sum4], r4, r5, %[sum4]\n" + "mov.w r4, r1, ror #8\n" + "mov.w r5, r3, ror #8\n" + "sxtb16 r4, r4\n" + "sxtb16 r5, r5\n" + "smlad %[sum3], r4, r5, %[sum3]\n" + "subs %[colCnt], #1\n" + "bne COL_LOOP\n":[sum] "+r"(sum),[sum2] "+r"(sum2),[sum3] "+r"(sum3), + [sum4] "+r"(sum4),[pB] "+r"(pB),[pA] "+r"(pA):[colCnt] "r"(colCnt), + [ch_im_in] "r"(ch_im_in):"r0", "r1", "r2", "r3", "r4", "r5"); +#endif /*ARM_MATH_BIG_ENDIAN */ + +#endif /* USE_INTRINSIC */ + + colCnt = (dim_kernel_x * dim_kernel_y) & 0x1; + while (colCnt) + { + union arm_nnword inA, inB; + inA.word = arm_nn_read_q7x4(pA); + pA += ch_im_in; + inB.word = arm_nn_read_q7x4(pB); + pB += ch_im_in; + sum += inA.bytes[0] * inB.bytes[0]; + sum2 += inA.bytes[1] * inB.bytes[1]; + sum3 += inA.bytes[2] * inB.bytes[2]; + sum4 += inA.bytes[3] * inB.bytes[3]; + colCnt--; + } + + *pOut++ = (q7_t) __SSAT((sum >> out_shift), 8); + *pOut++ = (q7_t) __SSAT((sum2 >> out_shift), 8); + *pOut++ = (q7_t) __SSAT((sum3 >> out_shift), 8); + *pOut++ = (q7_t) __SSAT((sum4 >> out_shift), 8); + + rowCnt--; + } + + rowCnt = ch_im_out & 0x3; + while (rowCnt) + { + q7_t *pB = colBuffer + row_shift; + const q7_t *pA = wt + row_shift; + q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + uint16_t colCnt = (dim_kernel_x * dim_kernel_y); + + row_shift += 1; + + while (colCnt) + { + q7_t A1 = *pA; + q7_t B1 = *pB; + pA += ch_im_in; + pB += ch_im_in; + sum += A1 * B1; + + colCnt--; + } + *pOut++ = (q7_t) __SSAT((sum >> out_shift), 8); + rowCnt--; + } + + // clear counter and pointers + pBuffer = colBuffer; + } + } + +#else + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + int i_out_y, i_out_x, i_ch_out; + int i_ker_y, i_ker_x; + + /* do some checking here, basically ch_im_in == ch_im_out */ + if (ch_im_in != ch_im_out) + { + return ARM_MATH_SIZE_MISMATCH; + } + + for (i_out_y = 0; i_out_y < dim_im_out_y; i_out_y++) + { + for (i_out_x = 0; i_out_x < dim_im_out_x; i_out_x++) + { + for (i_ch_out = 0; i_ch_out < ch_im_out; i_ch_out++) + { + // for each output + int conv_out = ((q31_t)(bias[i_ch_out]) << bias_shift) + NN_ROUND(out_shift); + for (i_ker_y = 0; i_ker_y < dim_kernel_y; i_ker_y++) + { + for (i_ker_x = 0; i_ker_x < dim_kernel_x; i_ker_x++) + { + int in_row = stride_y * i_out_y + i_ker_y - padding_y; + int in_col = stride_x * i_out_x + i_ker_x - padding_x; + if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in_y && in_col < dim_im_in_x) + { + conv_out += Im_in[(in_row * dim_im_in_x + in_col) * ch_im_in + i_ch_out] * + wt[(i_ker_y * dim_kernel_x + i_ker_x) * ch_im_out + i_ch_out]; + } + } + } + Im_out[(i_out_y * dim_im_out_x + i_out_x) * ch_im_out + i_ch_out] = + (q7_t) __SSAT((conv_out >> out_shift), 8); + } + } + } + +#endif /* ARM_MATH_DSP */ + + + /* Return to application */ + return ARM_MATH_SUCCESS; + +} + +/** + * @} end of NNConv group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_depthwise_conv_s8_core.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_depthwise_conv_s8_core.c new file mode 100755 index 00000000000..05dc7aa1e06 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_depthwise_conv_s8_core.c @@ -0,0 +1,219 @@ +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_depthwise_conv_s8_core.c + * Description: Depthwise convolution on im2col buffers. + * + * $Date: March 3, 2020 + * $Revision: V.1.0.2 + * + * Target Processor: Cortex-M cores + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/* + * Depthwise conv on an im2col buffer where the input channel equals + * output channel. + * + * Refer header file for details. + * + */ + +q7_t *arm_nn_depthwise_conv_s8_core(const q7_t *row, + const q15_t *col, + const uint16_t num_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t kernel_size, + const int32_t *const output_bias, + q7_t *out) +{ +#if defined(ARM_MATH_MVEI) + int32_t ch_per_loop = num_ch / 4; + + const int32_t *bias = output_bias; + int8_t *out_tmp = out; + + int32_t idx = 0; + + while (ch_per_loop > 0) + { + int32x4_t ip_0; + int32x4_t ip_1; + int32_t ker_loop = kernel_size / 3; + int32x4_t out_0 = vldrwq_s32(bias); + int32x4_t out_1 = out_0; + bias += 4; + + const int32_t offset = idx * 4; + const int8_t *row_0 = row + offset; + const int16_t *col_0 = col + offset; + const int16_t *col_1 = col + kernel_size * num_ch + offset; + + int32x4_t ker_0 = vldrbq_s32(row_0); + + while (ker_loop > 0) + { + const int8_t *row_1 = row_0 + num_ch; + const int8_t *row_2 = row_0 + 2 * num_ch; + const int32x4_t ker_1 = vldrbq_s32(row_1); + const int32x4_t ker_2 = vldrbq_s32(row_2); + + ip_0 = vldrhq_s32(col_0); + ip_1 = vldrhq_s32(col_1); + col_0 += num_ch; + col_1 += num_ch; + + out_0 += vmulq_s32(ip_0, ker_0); + out_1 += vmulq_s32(ip_1, ker_0); + + ip_0 = vldrhq_s32(col_0); + ip_1 = vldrhq_s32(col_1); + col_0 += num_ch; + col_1 += num_ch; + + out_0 += vmulq_s32(ip_0, ker_1); + out_1 += vmulq_s32(ip_1, ker_1); + + ip_0 = vldrhq_s32(col_0); + ip_1 = vldrhq_s32(col_1); + col_0 += num_ch; + col_1 += num_ch; + + out_0 += vmulq_s32(ip_0, ker_2); + out_1 += vmulq_s32(ip_1, ker_2); + row_0 += 3 * num_ch; + + ker_0 = vldrbq_s32(row_0); + ker_loop--; + } + + idx++; + /* Handle tail kernel elements */ + ker_loop = kernel_size - ((kernel_size / 3) * 3); + while (ker_loop > 0) + { + ip_0 = vldrhq_s32(col_0); + ip_1 = vldrhq_s32(col_1); + + out_0 += vmulq_s32(ip_0, ker_0); + out_1 += vmulq_s32(ip_1, ker_0); + + col_0 += num_ch; + col_1 += num_ch; + + ip_0 = vldrhq_s32(col_0); + ip_1 = vldrhq_s32(col_1); + + row_0 += num_ch; + ker_0 = vldrbq_s32(row_0); + ker_loop--; + } + const int32x4_t mult = vldrwq_s32(out_mult); + const int32x4_t shift = vldrwq_s32(out_shift); + out_mult += 4; + out_shift += 4; + + out_0 = arm_requantize_mve_32x4(out_0, mult, shift); + out_1 = arm_requantize_mve_32x4(out_1, mult, shift); + + out_0 = vaddq_n_s32(out_0, out_offset); + out_0 = vmaxq_s32(out_0, vdupq_n_s32(activation_min)); + out_0 = vminq_s32(out_0, vdupq_n_s32(activation_max)); + vstrbq_s32(out_tmp, out_0); + + out_1 = vaddq_n_s32(out_1, out_offset); + out_1 = vmaxq_s32(out_1, vdupq_n_s32(activation_min)); + out_1 = vminq_s32(out_1, vdupq_n_s32(activation_max)); + vstrbq_s32(out_tmp + num_ch, out_1); + + out_tmp += 4; + ch_per_loop--; + } + + int32_t tail_ch = num_ch & 3; + if (tail_ch != 0) + { + int32_t ch_idx = (num_ch & ~3); + int32x4_t col_0_sum; + int32x4_t col_1_sum; + + const int32_t single_buffer_size = kernel_size * num_ch; + for (int i = 0; i < tail_ch; i++) + { + const int16_t *col_pos_0 = col + ch_idx; + const int16_t *col_pos_1 = col_pos_0 + single_buffer_size; + + const int8_t *row_pos = row + ch_idx; + int32_t sum_0 = bias[i]; + int32_t sum_1 = bias[i]; + + for (int j = 0; j < kernel_size; j++) + { + const int8_t row_val = row_pos[j * num_ch]; + sum_0 += row_val * col_pos_0[j * num_ch]; + sum_1 += row_val * col_pos_1[j * num_ch]; + } + col_0_sum[i] = sum_0; + col_1_sum[i] = sum_1; + + ch_idx++; + } + const mve_pred16_t p = vctp32q(tail_ch); + const int32x4_t mult = vldrwq_z_s32(out_mult, p); + const int32x4_t shift = vldrwq_z_s32(out_shift, p); + + col_0_sum = arm_requantize_mve_32x4(col_0_sum, mult, shift); + col_1_sum = arm_requantize_mve_32x4(col_1_sum, mult, shift); + + col_0_sum = vaddq_n_s32(col_0_sum, out_offset); + col_0_sum = vmaxq_s32(col_0_sum, vdupq_n_s32(activation_min)); + col_0_sum = vminq_s32(col_0_sum, vdupq_n_s32(activation_max)); + vstrbq_p_s32(out_tmp, col_0_sum, p); + + col_1_sum = vaddq_n_s32(col_1_sum, out_offset); + col_1_sum = vmaxq_s32(col_1_sum, vdupq_n_s32(activation_min)); + col_1_sum = vminq_s32(col_1_sum, vdupq_n_s32(activation_max)); + vstrbq_p_s32(out_tmp + num_ch, col_1_sum, p); + + out_tmp += tail_ch; + } + + return out_tmp + num_ch; +#else + (void)row; + (void)col; + (void)num_ch; + (void)out_shift; + (void)out_mult; + (void)out_offset; + (void)activation_min; + (void)activation_max; + (void)kernel_size; + (void)output_bias; + (void)out; + return NULL; +#endif +} diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15.c new file mode 100755 index 00000000000..f03e5ff35c9 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15.c @@ -0,0 +1,179 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mat_mult_kernel_q7_q15.c + * Description: Matrix-multiplication function for convolution + * + * $Date: 17. January 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + + /** + * @brief Matrix-multiplication function for convolution. + * + * @details Refer to header file for details. + * + */ + +q7_t *arm_nn_mat_mult_kernel_q7_q15(const q7_t * pA, + const q15_t * pInBuffer, + const uint16_t ch_im_out, + const uint16_t numCol_A, + const uint16_t bias_shift, + const uint16_t out_shift, + const q7_t * bias, + q7_t * pOut) +{ +#if defined (ARM_MATH_DSP) + /* set up the second output pointers */ + q7_t *pOut2 = pOut + ch_im_out; + const q7_t *pBias = bias; + + uint16_t rowCnt = ch_im_out >> 1; + /* this loop over rows in A */ + while (rowCnt) + { + /* setup pointers for B */ + const q15_t *pB = pInBuffer; + const q15_t *pB2 = pB + numCol_A; + + /* align the second pointer for A */ + const q7_t *pA2 = pA + numCol_A; + + /* init the sum with bias */ + q31_t sum = ((q31_t)(*pBias) << bias_shift) + NN_ROUND(out_shift); + q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum3 = ((q31_t)(*pBias) << bias_shift) + NN_ROUND(out_shift); + q31_t sum4 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + + uint16_t colCnt = numCol_A >> 2; + /* accumulate over the vector */ + while (colCnt) + { + q31_t inA11, inA12, inA21, inA22; + + q31_t inB1 = arm_nn_read_q15x2_ia(&pB); + q31_t inB2 = arm_nn_read_q15x2_ia(&pB2); + + pA = read_and_pad(pA, &inA11, &inA12); + pA2 = read_and_pad(pA2, &inA21, &inA22); + + sum = __SMLAD(inA11, inB1, sum); + sum2 = __SMLAD(inA11, inB2, sum2); + sum3 = __SMLAD(inA21, inB1, sum3); + sum4 = __SMLAD(inA21, inB2, sum4); + + inB1 = arm_nn_read_q15x2_ia(&pB); + inB2 = arm_nn_read_q15x2_ia(&pB2); + + sum = __SMLAD(inA12, inB1, sum); + sum2 = __SMLAD(inA12, inB2, sum2); + sum3 = __SMLAD(inA22, inB1, sum3); + sum4 = __SMLAD(inA22, inB2, sum4); + + colCnt--; + } /* while over colCnt */ + colCnt = numCol_A & 0x3; + while (colCnt) + { + q7_t inA1 = *pA++; + q15_t inB1 = *pB++; + q7_t inA2 = *pA2++; + q15_t inB2 = *pB2++; + + sum += inA1 * inB1; + sum2 += inA1 * inB2; + sum3 += inA2 * inB1; + sum4 += inA2 * inB2; + colCnt--; + } /* while over colCnt */ + *pOut++ = (q7_t) __SSAT((sum >> out_shift), 8); + *pOut++ = (q7_t) __SSAT((sum3 >> out_shift), 8); + *pOut2++ = (q7_t) __SSAT((sum2 >> out_shift), 8); + *pOut2++ = (q7_t) __SSAT((sum4 >> out_shift), 8); + + /* skip the row computed with A2 */ + pA += numCol_A; + rowCnt--; + } /* for over ch_im_out */ + + /* compute left-over row if any */ + if (ch_im_out & 0x1) + { + /* setup pointers for B */ + const q15_t *pB = pInBuffer; + const q15_t *pB2 = pB + numCol_A; + + /* load the bias */ + q31_t sum = ((q31_t)(*pBias) << bias_shift) + NN_ROUND(out_shift); + q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + + uint16_t colCnt = numCol_A >> 2; + while (colCnt) + { + q31_t inA11, inA12; + + q31_t inB1 = arm_nn_read_q15x2_ia(&pB); + q31_t inB2 = arm_nn_read_q15x2_ia(&pB2); + + pA = read_and_pad(pA, &inA11, &inA12); + + sum = __SMLAD(inA11, inB1, sum); + sum2 = __SMLAD(inA11, inB2, sum2); + + inB1 = arm_nn_read_q15x2_ia(&pB); + inB2 = arm_nn_read_q15x2_ia(&pB2); + + sum = __SMLAD(inA12, inB1, sum); + sum2 = __SMLAD(inA12, inB2, sum2); + + colCnt--; + } + colCnt = numCol_A & 0x3; + while (colCnt) + { + q7_t inA1 = *pA++; + q15_t inB1 = *pB++; + q15_t inB2 = *pB2++; + + sum += inA1 * inB1; + sum2 += inA1 * inB2; + colCnt--; + } + + *pOut++ = (q7_t) __SSAT((sum >> out_shift), 8); + *pOut2++ = (q7_t) __SSAT((sum2 >> out_shift), 8); + } + + pOut += ch_im_out; + + /* return the new output pointer with offset */ + return pOut; +#else + /* To be completed */ + return NULL; +#endif /* ARM_MATH_DSP */ + +} diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15_reordered.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15_reordered.c new file mode 100755 index 00000000000..31aa334c8ef --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15_reordered.c @@ -0,0 +1,129 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mat_mult_kernel_q7_q15_reordered.c + * Description: Matrix-multiplication function for convolution with reordered columns + * + * $Date: 17. January 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * -------------------------------------------------------------------- */ + +#include "arm_nnfunctions.h" +#include "arm_math.h" + + /** + * @brief Matrix-multiplication function for convolution with re-ordered input. + * + * @details Refer to header file for details. + * + */ + +q7_t *arm_nn_mat_mult_kernel_q7_q15_reordered(const q7_t * pA, + const q15_t * pInBuffer, + const uint16_t ch_im_out, + const uint16_t numCol_A, + const uint16_t bias_shift, + const uint16_t out_shift, + const q7_t * bias, + q7_t * pOut) +{ + +#if defined (ARM_MATH_DSP) + /* set up the second output pointers */ + q7_t *pOut2 = pOut + ch_im_out; + int i; + + /* this loop over rows in A */ + for (i = 0; i < ch_im_out; i += 2) + { + /* setup pointers for B */ + const q15_t *pB = pInBuffer; + const q15_t *pB2 = pB + numCol_A; + + /* align the second pointer for A */ + const q7_t *pA2 = pA + numCol_A; + + /* init the sum with bias */ + q31_t sum = ((q31_t)(bias[i]) << bias_shift) + NN_ROUND(out_shift); + q31_t sum2 = ((q31_t)(bias[i]) << bias_shift) + NN_ROUND(out_shift); + q31_t sum3 = ((q31_t)(bias[i + 1]) << bias_shift) + NN_ROUND(out_shift); + q31_t sum4 = ((q31_t)(bias[i + 1]) << bias_shift) + NN_ROUND(out_shift); + + uint16_t colCnt = numCol_A >> 2; + /* accumulate over the vector */ + while (colCnt) + { + q31_t inA11, inA12, inA21, inA22; + + q31_t inB1 = arm_nn_read_q15x2_ia(&pB); + q31_t inB2 = arm_nn_read_q15x2_ia(&pB2); + + pA = read_and_pad_reordered(pA, &inA11, &inA12); + pA2 = read_and_pad_reordered(pA2, &inA21, &inA22); + + sum = __SMLAD(inA11, inB1, sum); + sum2 = __SMLAD(inA11, inB2, sum2); + sum3 = __SMLAD(inA21, inB1, sum3); + sum4 = __SMLAD(inA21, inB2, sum4); + + inB1 = arm_nn_read_q15x2_ia(&pB); + inB2 = arm_nn_read_q15x2_ia(&pB2); + + sum = __SMLAD(inA12, inB1, sum); + sum2 = __SMLAD(inA12, inB2, sum2); + sum3 = __SMLAD(inA22, inB1, sum3); + sum4 = __SMLAD(inA22, inB2, sum4); + + colCnt--; + } /* while over colCnt */ + colCnt = numCol_A & 0x3; + while (colCnt) + { + q7_t inA1 = *pA++; + q15_t inB1 = *pB++; + q7_t inA2 = *pA2++; + q15_t inB2 = *pB2++; + + sum += inA1 * inB1; + sum2 += inA1 * inB2; + sum3 += inA2 * inB1; + sum4 += inA2 * inB2; + colCnt--; + } /* while over colCnt */ + *pOut++ = (q7_t) __SSAT((sum >> out_shift), 8); + *pOut++ = (q7_t) __SSAT((sum3 >> out_shift), 8); + *pOut2++ = (q7_t) __SSAT((sum2 >> out_shift), 8); + *pOut2++ = (q7_t) __SSAT((sum4 >> out_shift), 8); + + /* skip the row computed with A2 */ + pA += numCol_A; + } /* for over ch_im_out */ + + pOut += ch_im_out; + + /* return the new output pointer with offset */ + return pOut; +#else + /* To be completed */ + return NULL; +#endif /* ARM_MATH_DSP */ +} diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16.c new file mode 100755 index 00000000000..8980187e6f5 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16.c @@ -0,0 +1,391 @@ +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mat_mult_kernel_s8_s16.c + * Description: Matrix-multiplication function for convolution + * + * $Date: February 27, 2020 + * $Revision: V.1.0.1 + * + * Target Processor: Cortex-M cores + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/* + * Matrix-multiplication function for convolution with per-channel requantization. + * + * Refer header file for details. + * + */ + +q7_t *arm_nn_mat_mult_kernel_s8_s16(const q7_t *input_a, + const q15_t *input_b, + const uint16_t output_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int16_t activation_min, + const int16_t activation_max, + const uint16_t num_col_a, + const int32_t *const output_bias, + q7_t *out_0) +{ +#if defined(ARM_MATH_MVEI) +#define ROW_PER_LOOP (4) +#define COL_PER_LOOP (8) + + const q7_t *ip_a0_s8 = input_a; + q7_t *out_1 = out_0 + output_ch; + + const int32_t *bias = output_bias; + + int32_t row_count = output_ch / ROW_PER_LOOP; + + while (row_count) + { + const q15_t *ip_b0_s16 = input_b; + const q15_t *ip_b1_s16 = input_b + num_col_a; + + const q7_t *ip_a1_s8 = ip_a0_s8 + num_col_a; + const q7_t *ip_a2_s8 = ip_a0_s8 + num_col_a * 2; + const q7_t *ip_a3_s8 = ip_a0_s8 + num_col_a * 3; + + q31_t ch_0_out_n = bias[0]; + q31_t ch_1_out_n = bias[1]; + q31_t ch_2_out_n = bias[2]; + q31_t ch_3_out_n = bias[3]; + + q31_t ch_0_out_n1 = ch_0_out_n; + q31_t ch_1_out_n1 = ch_1_out_n; + q31_t ch_2_out_n1 = ch_2_out_n; + q31_t ch_3_out_n1 = ch_3_out_n; + bias += 4; + + int32_t col_count = num_col_a / COL_PER_LOOP; + + while (col_count) + { + // Load inputs + const int16x8_t ip_b0 = vld1q_s16(ip_b0_s16); + ip_b0_s16 += COL_PER_LOOP; + const int16x8_t ip_b1 = vld1q_s16(ip_b1_s16); + ip_b1_s16 += COL_PER_LOOP; + + // Load filters + const int16x8_t ip_a0 = vldrbq_s16(ip_a0_s8); + ip_a0_s8 += COL_PER_LOOP; + const int16x8_t ip_a1 = vldrbq_s16(ip_a1_s8); + ip_a1_s8 += COL_PER_LOOP; + const int16x8_t ip_a2 = vldrbq_s16(ip_a2_s8); + ip_a2_s8 += COL_PER_LOOP; + const int16x8_t ip_a3 = vldrbq_s16(ip_a3_s8); + ip_a3_s8 += COL_PER_LOOP; + + // MAC + ch_0_out_n += vmladavq_s16(ip_b0, ip_a0); + ch_1_out_n += vmladavq_s16(ip_b0, ip_a1); + ch_2_out_n += vmladavq_s16(ip_b0, ip_a2); + ch_3_out_n += vmladavq_s16(ip_b0, ip_a3); + ch_0_out_n1 += vmladavq_s16(ip_b1, ip_a0); + ch_1_out_n1 += vmladavq_s16(ip_b1, ip_a1); + ch_2_out_n1 += vmladavq_s16(ip_b1, ip_a2); + ch_3_out_n1 += vmladavq_s16(ip_b1, ip_a3); + + col_count--; + } + + /* Handle tail */ + col_count = (num_col_a & (COL_PER_LOOP - 1)) - 1; + while (col_count >= 0) + { + const int32_t b0 = ip_b0_s16[col_count]; + const int32_t b1 = ip_b1_s16[col_count]; + + ch_0_out_n += b0 * ip_a0_s8[col_count]; + ch_1_out_n += b0 * ip_a1_s8[col_count]; + ch_2_out_n += b0 * ip_a2_s8[col_count]; + ch_3_out_n += b0 * ip_a3_s8[col_count]; + + ch_0_out_n1 += b1 * ip_a0_s8[col_count]; + ch_1_out_n1 += b1 * ip_a1_s8[col_count]; + ch_2_out_n1 += b1 * ip_a2_s8[col_count]; + ch_3_out_n1 += b1 * ip_a3_s8[col_count]; + col_count--; + } + ip_a0_s8 += (num_col_a & (COL_PER_LOOP - 1)); + + int32x4_t out_vec_0; + int32x4_t out_vec_1; + out_vec_0[0] = ch_0_out_n; + out_vec_0[1] = ch_1_out_n; + out_vec_0[2] = ch_2_out_n; + out_vec_0[3] = ch_3_out_n; + + out_vec_1[0] = ch_0_out_n1; + out_vec_1[1] = ch_1_out_n1; + out_vec_1[2] = ch_2_out_n1; + out_vec_1[3] = ch_3_out_n1; + + int32x4_t mult = vldrwq_s32(out_mult); + int32x4_t shift = vldrwq_s32(out_shift); + out_mult += ROW_PER_LOOP; + out_shift += ROW_PER_LOOP; + + out_vec_0 = arm_requantize_mve_32x4(out_vec_0, mult, shift); + out_vec_1 = arm_requantize_mve_32x4(out_vec_1, mult, shift); + + out_vec_0 = vaddq_n_s32(out_vec_0, out_offset); + out_vec_0 = vmaxq_s32(out_vec_0, vdupq_n_s32(activation_min)); + out_vec_0 = vminq_s32(out_vec_0, vdupq_n_s32(activation_max)); + vstrbq_s32(out_0, out_vec_0); + out_0 += ROW_PER_LOOP; + + out_vec_1 = vaddq_n_s32(out_vec_1, out_offset); + out_vec_1 = vmaxq_s32(out_vec_1, vdupq_n_s32(activation_min)); + out_vec_1 = vminq_s32(out_vec_1, vdupq_n_s32(activation_max)); + vstrbq_s32(out_1, out_vec_1); + out_1 += ROW_PER_LOOP; + row_count--; + ip_a0_s8 += (num_col_a * 3); + } + + row_count = output_ch & (ROW_PER_LOOP - 1); + + if (row_count) + { + ip_a0_s8 = input_a + num_col_a * (output_ch & ~3); + const mve_pred16_t p = vctp32q(row_count); + int32x4_t out_vec_0 = vdupq_n_s32(0); + int32x4_t out_vec_1 = vdupq_n_s32(0); + int32x4_t mult_tail; + int32x4_t shift_tail; + + for (int i_ch = 0; i_ch < row_count; i_ch++) + { + int32_t output_0 = bias[i_ch]; + int32_t output_1 = bias[i_ch]; + const q15_t *ip_b0_s16 = input_b; + const q15_t *ip_b1_s16 = input_b + num_col_a; + + for (int i_idx = 0; i_idx < num_col_a; i_idx++) + { + output_0 += ip_b0_s16[i_idx] * ip_a0_s8[i_idx]; + output_1 += ip_b1_s16[i_idx] * ip_a0_s8[i_idx]; + } + + ip_a0_s8 += num_col_a; + out_vec_0[i_ch] = output_0; + out_vec_1[i_ch] = output_1; + mult_tail[i_ch] = out_mult[i_ch]; + shift_tail[i_ch] = out_shift[i_ch]; + } + out_vec_0 = arm_requantize_mve_32x4(out_vec_0, mult_tail, shift_tail); + out_vec_1 = arm_requantize_mve_32x4(out_vec_1, mult_tail, shift_tail); + + out_vec_0 = vaddq_n_s32(out_vec_0, out_offset); + out_vec_0 = vmaxq_s32(out_vec_0, vdupq_n_s32(activation_min)); + out_vec_0 = vminq_s32(out_vec_0, vdupq_n_s32(activation_max)); + vstrbq_p_s32(out_0, out_vec_0, p); + + out_vec_1 = vaddq_n_s32(out_vec_1, out_offset); + out_vec_1 = vmaxq_s32(out_vec_1, vdupq_n_s32(activation_min)); + out_vec_1 = vminq_s32(out_vec_1, vdupq_n_s32(activation_max)); + + vstrbq_p_s32(out_1, out_vec_1, p); + out_1 += row_count; + } + + return out_1; + +#elif defined(ARM_MATH_DSP) + /* set up the second output pointers */ + q7_t *out_1 = out_0 + output_ch; + const int32_t *bias = output_bias; + + uint16_t row_count = output_ch / 2; + const q7_t *ip_a0 = input_a; + /* this loop over rows in A */ + while (row_count) + { + /* setup pointers for B */ + const q15_t *ip_b0 = input_b; + const q15_t *ip_b1 = ip_b0 + num_col_a; + + /* align the second pointer for A */ + const q7_t *ip_a1 = ip_a0 + num_col_a; + + /* Init accumulator with bias for channel N and N + 1 */ + q31_t ch_0_out_0 = *bias; + q31_t ch_0_out_1 = *bias++; + q31_t ch_1_out_0 = *bias; + q31_t ch_1_out_1 = *bias++; + + uint16_t col_count = num_col_a / 4; + /* accumulate over the vector */ + while (col_count) + { + q31_t a01, a02, a11, a12; + q31_t b0 = arm_nn_read_q15x2_ia(&ip_b0); + q31_t b1 = arm_nn_read_q15x2_ia(&ip_b1); + + ip_a0 = read_and_pad(ip_a0, &a01, &a02); + ip_a1 = read_and_pad(ip_a1, &a11, &a12); + + ch_0_out_0 = __SMLAD(a01, b0, ch_0_out_0); + ch_0_out_1 = __SMLAD(a01, b1, ch_0_out_1); + ch_1_out_0 = __SMLAD(a11, b0, ch_1_out_0); + ch_1_out_1 = __SMLAD(a11, b1, ch_1_out_1); + + b0 = arm_nn_read_q15x2_ia(&ip_b0); + b1 = arm_nn_read_q15x2_ia(&ip_b1); + + ch_0_out_0 = __SMLAD(a02, b0, ch_0_out_0); + ch_0_out_1 = __SMLAD(a02, b1, ch_0_out_1); + ch_1_out_0 = __SMLAD(a12, b0, ch_1_out_0); + ch_1_out_1 = __SMLAD(a12, b1, ch_1_out_1); + + col_count--; + } /* while over col_count */ + col_count = num_col_a & 0x3; + while (col_count) + { + q7_t a0 = *ip_a0++; + q15_t b0 = *ip_b0++; + q7_t a1 = *ip_a1++; + q15_t b1 = *ip_b1++; + + ch_0_out_0 += a0 * b0; + ch_0_out_1 += a0 * b1; + ch_1_out_0 += a1 * b0; + ch_1_out_1 += a1 * b1; + col_count--; + } /* while over col_count */ + + ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift); + ch_0_out_0 += out_offset; + ch_0_out_0 = MAX(ch_0_out_0, activation_min); + ch_0_out_0 = MIN(ch_0_out_0, activation_max); + *out_0++ = (q7_t)ch_0_out_0; + + ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift); + ch_0_out_1 += out_offset; + ch_0_out_1 = MAX(ch_0_out_1, activation_min); + ch_0_out_1 = MIN(ch_0_out_1, activation_max); + *out_1++ = (q7_t)ch_0_out_1; + out_mult++; + out_shift++; + + ch_1_out_0 = arm_nn_requantize(ch_1_out_0, *out_mult, *out_shift); + ch_1_out_0 += out_offset; + ch_1_out_0 = MAX(ch_1_out_0, activation_min); + ch_1_out_0 = MIN(ch_1_out_0, activation_max); + *out_0++ = (q7_t)ch_1_out_0; + + ch_1_out_1 = arm_nn_requantize(ch_1_out_1, *out_mult, *out_shift); + ch_1_out_1 += out_offset; + ch_1_out_1 = MAX(ch_1_out_1, activation_min); + ch_1_out_1 = MIN(ch_1_out_1, activation_max); + *out_1++ = (q7_t)ch_1_out_1; + out_mult++; + out_shift++; + + /* skip row */ + ip_a0 += num_col_a; + row_count--; + } + + /* compute the last odd numbered row if any */ + if (output_ch & 0x1) + { + /* setup pointers for B */ + const q15_t *ip_b0 = input_b; + const q15_t *ip_b1 = ip_b0 + num_col_a; + + /* load the bias */ + q31_t ch_0_out_0 = *bias; + q31_t ch_0_out_1 = *bias++; + + uint16_t col_count = num_col_a >> 2; + while (col_count) + { + q31_t a01, a02; + q31_t b0 = arm_nn_read_q15x2_ia(&ip_b0); + q31_t b1 = arm_nn_read_q15x2_ia(&ip_b1); + + ip_a0 = read_and_pad(ip_a0, &a01, &a02); + + ch_0_out_0 = __SMLAD(a01, b0, ch_0_out_0); + ch_0_out_1 = __SMLAD(a01, b1, ch_0_out_1); + + b0 = arm_nn_read_q15x2_ia(&ip_b0); + b1 = arm_nn_read_q15x2_ia(&ip_b1); + ch_0_out_0 = __SMLAD(a02, b0, ch_0_out_0); + ch_0_out_1 = __SMLAD(a02, b1, ch_0_out_1); + + col_count--; + } + col_count = num_col_a & 0x3; + while (col_count) + { + q7_t a0 = *ip_a0++; + q15_t b0 = *ip_b0++; + q15_t b1 = *ip_b1++; + + ch_0_out_0 += a0 * b0; + ch_0_out_1 += a0 * b1; + col_count--; + } + ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift); + ch_0_out_0 += out_offset; + ch_0_out_0 = MAX(ch_0_out_0, activation_min); + ch_0_out_0 = MIN(ch_0_out_0, activation_max); + *out_0++ = (q7_t)ch_0_out_0; + + ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift); + ch_0_out_1 += out_offset; + ch_0_out_1 = MAX(ch_0_out_1, activation_min); + ch_0_out_1 = MIN(ch_0_out_1, activation_max); + *out_1++ = (q7_t)ch_0_out_1; + out_mult++; + out_shift++; + } + + out_0 += output_ch; + + /* return the new output pointer with offset */ + return out_0; +#else + (void)input_a; + (void)input_b; + (void)output_ch; + (void)out_shift; + (void)out_mult; + (void)out_offset; + (void)activation_min; + (void)activation_max; + (void)num_col_a; + (void)output_bias; + (void)out_0; + /* To be completed */ + return NULL; +#endif +} diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16_reordered.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16_reordered.c new file mode 100755 index 00000000000..a25b1ffe4ed --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16_reordered.c @@ -0,0 +1,201 @@ +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mat_mult_kernel_s8_s16_reordered.c + * Description: Matrix-multiplication function for convolution with reordered columns + * + * $Date: February 27, 2020 + * $Revision: V.1.0.2 + * + * Target Processor: Cortex-M cores + * -------------------------------------------------------------------- */ + +#include "arm_nnfunctions.h" +#include "arm_math.h" + +/* + * Matrix-multiplication with re-ordered input and bias inputs for convolution with per-channel + * requantization. The re-ordering is a consequence of sign extension is done by the SXTB16 command. + * + * Refer header file for details. This function differs from arm_nn_mat_mult_kernel_s8_s16(), in that it uses + * read_and_pad_reordered() instead of arm_nn_mat_mult_kernel_s8_s16(). Investigating the cycles impact and + * unifying these two functions is a potential future improvement. + * + */ + +q7_t *arm_nn_mat_mult_kernel_s8_s16_reordered(const q7_t *input_a, + const q15_t *input_b, + const uint16_t output_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int16_t activation_min, + const int16_t activation_max, + const uint16_t num_col_a, + const int32_t *const output_bias, + q7_t *out_0) +{ +#if defined(ARM_MATH_DSP) + /* set up the second output pointers */ + q7_t *out_1 = out_0 + output_ch; + const int32_t *bias = output_bias; + + uint16_t row_count = output_ch / 2; + const q7_t *ip_a0 = input_a; + /* this loop over rows in A */ + while (row_count) + { + /* setup pointers for B */ + const q15_t *ip_b0 = input_b; + const q15_t *ip_b1 = ip_b0 + num_col_a; + + /* align the second pointer for A */ + const q7_t *ip_a1 = ip_a0 + num_col_a; + + /* Init accumulator with bias for channel N and N + 1 */ + q31_t ch_0_out_0 = *bias; + q31_t ch_0_out_1 = *bias++; + q31_t ch_1_out_0 = *bias; + q31_t ch_1_out_1 = *bias++; + + uint16_t col_count = num_col_a / 4; + /* accumulate over the vector */ + while (col_count) + { + q31_t a01, a02, a11, a12; + q31_t b0 = arm_nn_read_q15x2_ia(&ip_b0); + q31_t b1 = arm_nn_read_q15x2_ia(&ip_b1); + + ip_a0 = read_and_pad_reordered(ip_a0, &a01, &a02); + ip_a1 = read_and_pad_reordered(ip_a1, &a11, &a12); + + ch_0_out_0 = __SMLAD(a01, b0, ch_0_out_0); + ch_0_out_1 = __SMLAD(a01, b1, ch_0_out_1); + ch_1_out_0 = __SMLAD(a11, b0, ch_1_out_0); + ch_1_out_1 = __SMLAD(a11, b1, ch_1_out_1); + + b0 = arm_nn_read_q15x2_ia(&ip_b0); + b1 = arm_nn_read_q15x2_ia(&ip_b1); + + ch_0_out_0 = __SMLAD(a02, b0, ch_0_out_0); + ch_0_out_1 = __SMLAD(a02, b1, ch_0_out_1); + ch_1_out_0 = __SMLAD(a12, b0, ch_1_out_0); + ch_1_out_1 = __SMLAD(a12, b1, ch_1_out_1); + + col_count--; + } /* while over col_count */ + + ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift); + ch_0_out_0 += out_offset; + ch_0_out_0 = MAX(ch_0_out_0, activation_min); + ch_0_out_0 = MIN(ch_0_out_0, activation_max); + *out_0++ = (q7_t)ch_0_out_0; + + ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift); + ch_0_out_1 += out_offset; + ch_0_out_1 = MAX(ch_0_out_1, activation_min); + ch_0_out_1 = MIN(ch_0_out_1, activation_max); + *out_1++ = (q7_t)ch_0_out_1; + out_mult++; + out_shift++; + + ch_1_out_0 = arm_nn_requantize(ch_1_out_0, *out_mult, *out_shift); + ch_1_out_0 += out_offset; + ch_1_out_0 = MAX(ch_1_out_0, activation_min); + ch_1_out_0 = MIN(ch_1_out_0, activation_max); + *out_0++ = (q7_t)ch_1_out_0; + + ch_1_out_1 = arm_nn_requantize(ch_1_out_1, *out_mult, *out_shift); + ch_1_out_1 += out_offset; + ch_1_out_1 = MAX(ch_1_out_1, activation_min); + ch_1_out_1 = MIN(ch_1_out_1, activation_max); + *out_1++ = (q7_t)ch_1_out_1; + out_mult++; + out_shift++; + + /* skip row */ + ip_a0 += num_col_a; + row_count--; + } + + if (output_ch & 1) + { + /* setup pointers for B */ + const q15_t *ip_b0 = input_b; + const q15_t *ip_b1 = ip_b0 + num_col_a; + + /* Init accumulator with bias for channel N + 1 */ + q31_t ch_0_out_0 = *bias; + q31_t ch_0_out_1 = ch_0_out_0; + + int32_t col_count = num_col_a / 4; + while (col_count) + { + q31_t a01, a02; + q31_t b0 = arm_nn_read_q15x2_ia(&ip_b0); + q31_t b1 = arm_nn_read_q15x2_ia(&ip_b1); + + ip_a0 = read_and_pad_reordered(ip_a0, &a01, &a02); + + ch_0_out_0 = __SMLAD(a01, b0, ch_0_out_0); + ch_0_out_1 = __SMLAD(a01, b1, ch_0_out_1); + + b0 = arm_nn_read_q15x2_ia(&ip_b0); + b1 = arm_nn_read_q15x2_ia(&ip_b1); + + ch_0_out_0 = __SMLAD(a02, b0, ch_0_out_0); + ch_0_out_1 = __SMLAD(a02, b1, ch_0_out_1); + + col_count--; + } /* while over col_count */ + + ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift); + ch_0_out_0 += out_offset; + ch_0_out_0 = MAX(ch_0_out_0, activation_min); + ch_0_out_0 = MIN(ch_0_out_0, activation_max); + *out_0++ = (q7_t)ch_0_out_0; + + ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift); + ch_0_out_1 += out_offset; + ch_0_out_1 = MAX(ch_0_out_1, activation_min); + ch_0_out_1 = MIN(ch_0_out_1, activation_max); + *out_1++ = (q7_t)ch_0_out_1; + } + + out_0 += output_ch; + + /* return the new output pointer with offset */ + return out_0; +#else + (void)input_a; + (void)input_b; + (void)output_ch; + (void)out_shift; + (void)out_mult; + (void)out_offset; + (void)activation_min; + (void)activation_max; + (void)num_col_a; + (void)output_bias; + (void)out_0; + /* To be completed */ + return NULL; +#endif +} diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_mat_mult_s8.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_mat_mult_s8.c new file mode 100755 index 00000000000..5afc5e90bf2 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/ConvolutionFunctions/arm_nn_mat_mult_s8.c @@ -0,0 +1,173 @@ +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mat_mult_s8.c + * Description: General Matrix-multiplication function + * + * $Date: March 6, 2020 + * $Revision: V.2.0.2 + * + * Target Processor: Cortex-M cores + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/* + * s8 General matrix multiplication function with per-channel requantization for upto 4 column batches. + * + * Refer header file for details. + * + */ + +q7_t *arm_nn_mat_mult_s8(const q7_t *input_row, + const q7_t *input_col, + const uint16_t output_ch, + const uint16_t col_batches, + const int32_t *output_shift, + const int32_t *output_mult, + const int32_t out_offset, + const int32_t col_offset, + const int32_t row_offset, + const int16_t activation_min, + const int16_t activation_max, + const uint16_t row_len, + const int32_t *const bias, + q7_t *out) +{ +#if defined(ARM_MATH_MVEI) + (void)row_offset; + if (col_batches == 4) + { + for (int i_out_ch = 0; i_out_ch < output_ch; i_out_ch++) + { + int32_t row_len_tmp = row_len; + const int8_t *ip_r0 = input_row + (i_out_ch * row_len); + const int8_t *ip_c0 = input_col; + const int8_t *ip_c1 = input_col + row_len; + const int8_t *ip_c2 = input_col + (2 * row_len); + const int8_t *ip_c3 = input_col + (3 * row_len); + + int32_t acc_0 = bias[i_out_ch]; + int32_t acc_1 = bias[i_out_ch]; + int32_t acc_2 = bias[i_out_ch]; + int32_t acc_3 = bias[i_out_ch]; + const int32_t row_loop_cnt = (row_len + 7) / 8; + + for (int i_row_loop = 0; i_row_loop < row_loop_cnt; i_row_loop++) + { + mve_pred16_t p = vctp16q(row_len_tmp); + const int16x8_t offset = vdupq_m_n_s16(vuninitializedq_s16(), col_offset, p); + row_len_tmp -= 8; + + int16x8_t r0 = vldrbq_z_s16(ip_r0, p); + ip_r0 += 8; + + int16x8_t c0 = vldrbq_z_s16(ip_c0, p); + ip_c0 += 8; + c0 = vaddq_m_s16(vuninitializedq_s16(), c0, offset, p); + + int16x8_t c1 = vldrbq_z_s16(ip_c1, p); + ip_c1 += 8; + c1 = vaddq_m_s16(vuninitializedq_s16(), c1, offset, p); + + int16x8_t c2 = vldrbq_z_s16(ip_c2, p); + ip_c2 += 8; + c2 = vaddq_m_s16(vuninitializedq_s16(), c2, offset, p); + + int16x8_t c3 = vldrbq_z_s16(ip_c3, p); + ip_c3 += 8; + c3 = vaddq_m_s16(vuninitializedq_s16(), c3, offset, p); + + acc_0 = vmladavaq_p_s16(acc_0, r0, c0, p); + acc_1 = vmladavaq_p_s16(acc_1, r0, c1, p); + acc_2 = vmladavaq_p_s16(acc_2, r0, c2, p); + acc_3 = vmladavaq_p_s16(acc_3, r0, c3, p); + } + + int32x4_t res = {acc_0, acc_1, acc_2, acc_3}; + res = arm_requantize_mve(res, output_mult[i_out_ch], output_shift[i_out_ch]); + res = vaddq_n_s32(res, out_offset); + + res = vmaxq_s32(res, vdupq_n_s32(activation_min)); + res = vminq_s32(res, vdupq_n_s32(activation_max)); + + const uint32x4_t scatter_offset = {0, output_ch, output_ch * 2, output_ch * 3}; + vstrbq_scatter_offset_s32(&out[i_out_ch], scatter_offset, res); + } + out += 4 * output_ch; + } + else + { + for (int i_col_batch = (col_batches & ~0x3); i_col_batch < (col_batches & 0x3); i_col_batch++) + { + for (int i_out_ch = 0; i_out_ch < output_ch; i_out_ch++) + { + int32_t row_len_tmp = row_len; + + const int8_t *ip_r0 = input_row + (i_out_ch * row_len); + const int8_t *ip_c0 = input_col + (i_col_batch * row_len); + int32_t acc_0 = bias[i_out_ch]; + const int32_t row_loop_cnt = (row_len + 7) / 8; + + for (int i_row_loop = 0; i_row_loop < row_loop_cnt; i_row_loop++) + { + const mve_pred16_t p = vctp16q(row_len_tmp); + const int16x8_t offset = vdupq_m_n_s16(vuninitializedq_s16(), col_offset, p); + row_len_tmp -= 8; + + int16x8_t r0 = vldrbq_z_s16(ip_r0, p); + ip_r0 += 8; + int16x8_t c0 = vldrbq_z_s16(ip_c0, p); + ip_c0 += 8; + + c0 = vaddq_m_s16(vuninitializedq_s16(), c0, offset, p); + acc_0 = vmladavaq_p_s16(acc_0, r0, c0, p); + } + + acc_0 = arm_nn_requantize(acc_0, output_mult[i_out_ch], output_shift[i_out_ch]); + acc_0 += out_offset; + acc_0 = MAX(acc_0, activation_min); + acc_0 = MIN(acc_0, activation_max); + out[i_out_ch] = (q7_t)acc_0; + } + out += output_ch; + } + } + return out; + +#else + (void)input_row; + (void)input_col; + (void)output_ch; + (void)col_batches; + (void)output_shift; + (void)output_mult; + (void)out_offset; + (void)col_offset; + (void)row_offset; + (void)activation_min; + (void)activation_max; + (void)row_len; + (void)bias; + (void)out; + return NULL; +#endif +} diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/CMakeLists.txt b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/CMakeLists.txt new file mode 100755 index 00000000000..7a09c7474ca --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/CMakeLists.txt @@ -0,0 +1,19 @@ +cmake_minimum_required (VERSION 3.6) + +project(CMSISNNFullyConnected) + +include(configLib) + +file(GLOB SRC "./*_*.c") + +add_library(CMSISNNFullyConnected STATIC ${SRC}) + +configLib(CMSISNNFullyConnected ${ROOT}) +configDsp(CMSISNNFullyConnected ${ROOT}) + +### Includes +target_include_directories(CMSISNNFullyConnected PUBLIC "${NN}/Include") +target_include_directories(CMSISNNFullyConnected PUBLIC "${ROOT}/CMSIS/DSP/Include") + + + diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15.c new file mode 100755 index 00000000000..ae2287c8d53 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15.c @@ -0,0 +1,199 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_fully_connected_mat_q7_vec_q15.c + * Description: Mixed Q15-Q7 fully-connected layer function + * + * $Date: 17. January 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup FC + * @{ + */ + + /** + * @brief Mixed Q15-Q7 fully-connected layer function + * @param[in] pV pointer to input vector + * @param[in] pM pointer to matrix weights + * @param[in] dim_vec length of the vector + * @param[in] num_of_rows number of rows in weight matrix + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in] bias pointer to bias + * @param[in,out] pOut pointer to output vector + * @param[in,out] vec_buffer pointer to buffer space for input + * @return The function returns ARM_MATH_SUCCESS + * + * @details + * + * Buffer size: + * + * vec_buffer size: 0 + * + * Q7_Q15 version of the fully connected layer + * + * Weights are in q7_t and Activations are in q15_t + * + */ + +arm_status +arm_fully_connected_mat_q7_vec_q15(const q15_t * pV, + const q7_t * pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, + const q7_t * bias, + q15_t * pOut, + q15_t * vec_buffer) +{ + (void)vec_buffer; +#if defined (ARM_MATH_DSP) + /* Run the following code for Cortex-M4 and Cortex-M7 */ + + const q7_t *pB = pM; + const q7_t *pB2; + q15_t *pO = pOut; + const q7_t *pBias = bias; + const q15_t *pA = pV; + + uint16_t rowCnt = num_of_rows >> 1; + + while (rowCnt) + { + q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + uint16_t colCnt = dim_vec >> 2; + + pA = pV; + pB2 = pB + dim_vec; + + while (colCnt) + { + q31_t inV, inM11, inM12, inM21, inM22; + pB = read_and_pad(pB, &inM11, &inM12); + pB2 = read_and_pad(pB2, &inM21, &inM22); + + inV = arm_nn_read_q15x2_ia(&pA); + + sum = __SMLAD(inV, inM11, sum); + sum2 = __SMLAD(inV, inM21, sum2); + + inV = arm_nn_read_q15x2_ia(&pA); + + sum = __SMLAD(inV, inM12, sum); + sum2 = __SMLAD(inV, inM22, sum2); + + colCnt--; + } + colCnt = dim_vec & 0x3; + while (colCnt) + { + q15_t inV = *pA++; + q7_t inM = *pB++; + q7_t inM2 = *pB2++; + + sum += inV * inM; + sum2 += inV * inM2; + colCnt--; + } /* while over colCnt */ + *pO++ = (q15_t) (__SSAT((sum >> out_shift), 16)); + *pO++ = (q15_t) (__SSAT((sum2 >> out_shift), 16)); + + /*adjust the pointers and counters */ + pB += dim_vec; + rowCnt--; + } + + /* left-over part of the rows */ + rowCnt = num_of_rows & 0x1; + + while (rowCnt) + { + q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + uint16_t colCnt = dim_vec >> 2; + + pA = pV; + + while (colCnt) + { + q31_t inV1, inV2, inM11, inM12; + + pB = read_and_pad(pB, &inM11, &inM12); + + inV1 = arm_nn_read_q15x2_ia(&pA); + sum = __SMLAD(inV1, inM11, sum); + + inV2 = arm_nn_read_q15x2_ia(&pA); + sum = __SMLAD(inV2, inM12, sum); + + colCnt--; + } + + /* left-over of the vector */ + colCnt = dim_vec & 0x3; + while (colCnt) + { + q15_t inV = *pA++; + q7_t inM = *pB++; + sum += inV * inM; + colCnt--; + } + + *pO++ = (q15_t) (__SSAT((sum >> out_shift), 16)); + + rowCnt--; + } + +#else + int i, j; + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + for (i = 0; i < num_of_rows; i++) + { + int ip_out = ((q31_t)(bias[i]) << bias_shift) + NN_ROUND(out_shift); + for (j = 0; j < dim_vec; j++) + { + ip_out += pV[j] * pM[i * dim_vec + j]; + } + pOut[i] = (q15_t) __SSAT((ip_out >> out_shift), 16); + } + +#endif /* ARM_MATH_DSP */ + + /* Return to ARM_MATH_SUCCESS */ + return (ARM_MATH_SUCCESS); + +} + +/** + * @} end of FC group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15_opt.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15_opt.c new file mode 100755 index 00000000000..b01f9e3792b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15_opt.c @@ -0,0 +1,404 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_fully_connected_mat_q7_vec_q15_opt.c + * Description: Mixed Q15-Q7 opt fully-connected layer function + * + * $Date: 17. January 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup FC + * @{ + */ + + /** + * @brief Mixed Q15-Q7 opt fully-connected layer function + * @param[in] pV pointer to input vector + * @param[in] pM pointer to matrix weights + * @param[in] dim_vec length of the vector + * @param[in] num_of_rows number of rows in weight matrix + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in] bias pointer to bias + * @param[in,out] pOut pointer to output vector + * @param[in,out] vec_buffer pointer to buffer space for input + * @return The function returns ARM_MATH_SUCCESS + * + * @details + * + * Buffer size: + * + * vec_buffer size: 0 + * + * Q7_Q15 version of the fully connected layer + * + * Weights are in q7_t and Activations are in q15_t + * + * Limitation: x4 version requires weight reordering to work + * + * Here we use only one pointer to read 4 rows in the weight + * matrix. So if the original q7_t matrix looks like this: + * + * | a11 | a12 | a13 | a14 | a15 | a16 | a17 | + * + * | a21 | a22 | a23 | a24 | a25 | a26 | a27 | + * + * | a31 | a32 | a33 | a34 | a35 | a36 | a37 | + * + * | a41 | a42 | a43 | a44 | a45 | a46 | a47 | + * + * | a51 | a52 | a53 | a54 | a55 | a56 | a57 | + * + * | a61 | a62 | a63 | a64 | a65 | a66 | a67 | + * + * We operates on multiple-of-4 rows, so the first four rows becomes + * + * | a11 | a21 | a12 | a22 | a31 | a41 | a32 | a42 | + * + * | a13 | a23 | a14 | a24 | a33 | a43 | a34 | a44 | + * + * | a15 | a25 | a16 | a26 | a35 | a45 | a36 | a46 | + * + * The column left over will be in-order. + * which is: + * | a17 | a27 | a37 | a47 | + * + * For the left-over rows, we do 1x1 computation, so the data remains + * as its original order. + * + * So the stored weight matrix looks like this: + * + * | a11 | a21 | a12 | a22 | a31 | a41 | + * + * | a32 | a42 | a13 | a23 | a14 | a24 | + * + * | a33 | a43 | a34 | a44 | a15 | a25 | + * + * | a16 | a26 | a35 | a45 | a36 | a46 | + * + * | a17 | a27 | a37 | a47 | a51 | a52 | + * + * | a53 | a54 | a55 | a56 | a57 | a61 | + * + * | a62 | a63 | a64 | a65 | a66 | a67 | + * + */ + +arm_status +arm_fully_connected_mat_q7_vec_q15_opt(const q15_t * pV, + const q7_t * pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, const q7_t * bias, q15_t * pOut, q15_t * vec_buffer) +{ + + (void)vec_buffer; +#if defined (ARM_MATH_DSP) + /* Run the following code for Cortex-M4 and Cortex-M7 */ + + const q7_t *pB = pM; + q15_t *pO = pOut; + const q7_t *pBias = bias; + const q15_t *pA = pV; + + uint16_t rowCnt = num_of_rows >> 2; + + while (rowCnt) + { + q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum3 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum4 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + + uint16_t colCnt = dim_vec >> 1; + + pA = pV; + +#ifdef USE_INTRINSIC + +#ifndef ARM_MATH_BIG_ENDIAN + + while (colCnt) + { + q31_t inM11, inM12, inM13, inM14; + q31_t inV; + + inV = arm_nn_read_q15x2_ia(&pA); + inM11 = arm_nn_read_q7x4_ia(&pB); + inM12 = __SXTB16(__ROR(inM11, 8)); + inM11 = __SXTB16(inM11); + sum = __SMLAD(inM11, inV, sum); + sum2 = __SMLAD(inM12, inV, sum2); + inM13 = arm_nn_read_q7x4_ia(&pB); + inM14 = __SXTB16(__ROR(inM13, 8)); + inM13 = __SXTB16(inM13); + sum3 = __SMLAD(inM13, inV, sum3); + sum4 = __SMLAD(inM14, inV, sum4); + colCnt--; + } + +#else + + while (colCnt) + { + q31_t inM11, inM12, inM13, inM14; + q31_t inV; + + inV = *__SIMD32(pA)++; + inM11 = arm_nn_read_q7x4_ia(&pB); + inM12 = __SXTB16(__ROR(inM11, 8)); + inM11 = __SXTB16(inM11); + sum = __SMLAD(inM12, inV, sum); + sum2 = __SMLAD(inM11, inV, sum2); + inM13 = arm_nn_read_q7x4_ia(&pB); + inM14 = __SXTB16(__ROR(inM13, 8)); + inM13 = __SXTB16(inM13); + sum3 = __SMLAD(inM14, inV, sum3); + sum4 = __SMLAD(inM13, inV, sum4); + colCnt--; + } + +#endif /* ARM_MATH_BIG_ENDIAN */ + +#else + + /* + * register needed: + * loop counter: colCnt + * accumulators: sum, sum2, sum3, sum4 + * pointers: pB, pA + * weight data: inM11, inM12, inM13, inM14 + * activation data: inV + */ + +#ifndef ARM_MATH_BIG_ENDIAN + asm volatile ("COL_LOOP_%=:\n" + "ldr.w r4, [%[pA]], #4\n" + "ldr.w r1, [%[pB]], #8\n" + "mov.w r0, r1, ror #8\n" + "sxtb16 r0, r0\n" + "sxtb16 r1, r1\n" + "smlad %[sum], r4, r1, %[sum]\n" + "smlad %[sum2], r4, r0, %[sum2]\n" + "ldr.w r3, [%[pB], #-4]\n" + "mov.w r2, r3, ror #8\n" + "sxtb16 r2, r2\n" + "sxtb16 r3, r3\n" + "smlad %[sum3], r4, r3, %[sum3]\n" + "smlad %[sum4], r4, r2, %[sum4]\n" + "subs %[colCnt], #1\n" + "bne COL_LOOP_%=\n":[sum] "+r"(sum), + [sum2] "+r"(sum2),[sum3] "+r"(sum3), + [sum4] "+r"(sum4),[pB] "+r"(pB),[pA] "+r"(pA):[colCnt] "r"(colCnt):"r0", "r1", "r2", "r3", "r4"); +#else + asm volatile ("COL_LOOP_%=:\n" + "ldr.w r4, [%[pA]], #4\n" + "ldr.w r1, [%[pB]], #8\n" + "mov.w r0, r1, ror #8\n" + "sxtb16 r0, r0\n" + "sxtb16 r1, r1\n" + "smlad %[sum], r4, r0, %[sum]\n" + "smlad %[sum2], r4, r1, %[sum2]\n" + "ldr.w r3, [%[pB], #-4]\n" + "mov.w r2, r3, ror #8\n" + "sxtb16 r2, r2\n" + "sxtb16 r3, r3\n" + "smlad %[sum3], r4, r2, %[sum3]\n" + "smlad %[sum4], r4, r3, %[sum4]\n" + "subs %[colCnt], #1\n" + "bne COL_LOOP_%=\n":[sum] "+r"(sum), + [sum2] "+r"(sum2),[sum3] "+r"(sum3), + [sum4] "+r"(sum4),[pB] "+r"(pB),[pA] "+r"(pA):[colCnt] "r"(colCnt):"r0", "r1", "r2", "r3", "r4"); +#endif /* ARM_MATH_BIG_ENDIAN */ + +#endif /* USE_INTRINSIC */ + + colCnt = dim_vec & 0x1; + while (colCnt) + { + q15_t inV = *pA++; + q7_t inM = *pB++; + q7_t inM2 = *pB++; + q7_t inM3 = *pB++; + q7_t inM4 = *pB++; + + sum += inV * inM; + sum2 += inV * inM2; + sum3 += inV * inM3; + sum4 += inV * inM4; + colCnt--; + } /* while over colCnt */ + *pO++ = (q15_t) (__SSAT((sum >> out_shift), 16)); + *pO++ = (q15_t) (__SSAT((sum2 >> out_shift), 16)); + *pO++ = (q15_t) (__SSAT((sum3 >> out_shift), 16)); + *pO++ = (q15_t) (__SSAT((sum4 >> out_shift), 16)); + + /* adjust the pointers and counters */ + rowCnt--; + } + + /* left-over part of the rows */ + rowCnt = num_of_rows & 0x3; + + while (rowCnt) + { + q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + + uint16_t colCnt = dim_vec >> 2; + + pA = pV; + + while (colCnt) + { + q31_t inV1, inV2, inM11, inM12; + + pB = read_and_pad(pB, &inM11, &inM12); + + inV1 = arm_nn_read_q15x2_ia(&pA); + sum = __SMLAD(inV1, inM11, sum); + + inV2 = arm_nn_read_q15x2_ia(&pA); + sum = __SMLAD(inV2, inM12, sum); + + colCnt--; + } + + /* left-over of the vector */ + colCnt = dim_vec & 0x3; + while (colCnt) + { + q15_t inV = *pA++; + q7_t inM = *pB++; + sum += inV * inM; + colCnt--; + } + + *pO++ = (q15_t) (__SSAT((sum >> out_shift), 16)); + + rowCnt--; + } + +#else + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + uint16_t rowCnt = num_of_rows >> 2; + const q7_t *pB = pM; + const q15_t *pA; + q15_t *pO = pOut; + const q7_t *pBias = bias; + + while (rowCnt) + { + q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum3 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum4 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + uint16_t colCnt = dim_vec >> 1; + + pA = pV; + + while (colCnt) + { + q15_t inA1 = *pA++; + q15_t inA2 = *pA++; + + q7_t inB1 = *pB++; + q7_t inB3 = *pB++; + q7_t inB2 = *pB++; + q7_t inB4 = *pB++; + + sum += inA1 * inB1 + inA2 * inB2; + sum2 += inA1 * inB3 + inA2 * inB4; + + inB1 = *pB++; + inB3 = *pB++; + inB2 = *pB++; + inB4 = *pB++; + + sum3 += inA1 * inB1 + inA2 * inB2; + sum4 += inA1 * inB3 + inA2 * inB4; + + colCnt--; + } + + colCnt = dim_vec & 0x1; + while (colCnt) + { + q15_t inA = *pA++; + q7_t inB = *pB++; + sum += inA * inB; + inB = *pB++; + sum2 += inA * inB; + inB = *pB++; + sum3 += inA * inB; + inB = *pB++; + sum4 += inA * inB; + + colCnt--; + } + *pO++ = (q15_t) __SSAT((sum >> out_shift), 16); + *pO++ = (q15_t) __SSAT((sum2 >> out_shift), 16); + *pO++ = (q15_t) __SSAT((sum3 >> out_shift), 16); + *pO++ = (q15_t) __SSAT((sum4 >> out_shift), 16); + + rowCnt--; + } + + rowCnt = num_of_rows & 0x3; + + while (rowCnt) + { + int ip_out = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + int j; + + pA = pV; + for (j = 0; j < dim_vec; j++) + { + q15_t inA = *pA++; + q7_t inB = *pB++; + ip_out += inA * inB; + } + *pO++ = (q15_t) __SSAT((ip_out >> out_shift), 16); + + rowCnt--; + } + +#endif /* ARM_MATH_DSP */ + + /* Return to ARM_MATH_SUCCESS */ + return (ARM_MATH_SUCCESS); + +} + +/** + * @} end of FC group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_q15.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_q15.c new file mode 100755 index 00000000000..9a69a961df1 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_q15.c @@ -0,0 +1,193 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_fully_connected_q15.c + * Description: Q15 basic fully-connected layer function + * + * $Date: 17. January 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup FC + * @{ + */ + + /** + * @brief Q15 opt fully-connected layer function + * @param[in] pV pointer to input vector + * @param[in] pM pointer to matrix weights + * @param[in] dim_vec length of the vector + * @param[in] num_of_rows number of rows in weight matrix + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in] bias pointer to bias + * @param[in,out] pOut pointer to output vector + * @param[in,out] vec_buffer pointer to buffer space for input + * @return The function returns ARM_MATH_SUCCESS + * + * + * @details + * + * Buffer size: + * + * vec_buffer size: 0 + * + */ + +arm_status +arm_fully_connected_q15(const q15_t * pV, + const q15_t * pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, + const q15_t * bias, + q15_t * pOut, + q15_t * vec_buffer) +{ + (void)vec_buffer; +#if defined (ARM_MATH_DSP) + /* Run the following code for Cortex-M4 and Cortex-M7 */ + + const q15_t *pB = pM; + const q15_t *pB2 = pB + dim_vec; + q15_t *pO = pOut; + const q15_t *pA; + const q15_t *pBias = bias; + uint16_t rowCnt = num_of_rows >> 1; + + /* this loop loops over different output */ + while (rowCnt) { + q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + + uint16_t colCnt = dim_vec >> 2; + + pA = pV; + pB2 = pB + dim_vec; + + while (colCnt) + { + q31_t inV1, inM1, inM2; + inV1 = arm_nn_read_q15x2_ia(&pA); + inM1 = arm_nn_read_q15x2_ia(&pB); + sum = __SMLAD(inV1, inM1, sum); + inM2 = arm_nn_read_q15x2_ia(&pB2); + sum2 = __SMLAD(inV1, inM2, sum2); + + inV1 = arm_nn_read_q15x2_ia(&pA); + inM1 = arm_nn_read_q15x2_ia(&pB); + sum = __SMLAD(inV1, inM1, sum); + inM2 = arm_nn_read_q15x2_ia(&pB2); + sum2 = __SMLAD(inV1, inM2, sum2); + + colCnt--; + } + colCnt = dim_vec & 0x3; + while (colCnt) + { + q15_t inV = *pA++; + q15_t inM = *pB++; + q15_t inM2 = *pB2++; + + sum += inV * inM; + sum2 += inV * inM2; + colCnt--; + } /* while over colCnt */ + *pO++ = (q15_t) (__SSAT((sum >> out_shift), 16)); + *pO++ = (q15_t) (__SSAT((sum2>> out_shift), 16)); + + /* adjust the pointers and counters */ + pB = pB + dim_vec; + rowCnt --; + } + + rowCnt = num_of_rows & 0x1; + + while (rowCnt) { + q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + + uint16_t colCnt = dim_vec >> 2; + + pA = pV; + + while (colCnt) { + q31_t inV1, inM1; + inV1 = arm_nn_read_q15x2_ia(&pA); + inM1 = arm_nn_read_q15x2_ia(&pB); + sum = __SMLAD(inV1, inM1, sum); + + inV1 = arm_nn_read_q15x2_ia(&pA); + inM1 = arm_nn_read_q15x2_ia(&pB); + sum = __SMLAD(inV1, inM1, sum); + + colCnt--; + } + + /* left-over of the vector */ + colCnt = dim_vec & 0x3; + while(colCnt) { + q15_t inV = *pA++; + q15_t inM = *pB++; + + sum += inV * inM; + + colCnt--; + } + + *pO++ = (q15_t) (__SSAT((sum >> out_shift), 16)); + + rowCnt --; + } + +#else + int i, j; + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + for (i = 0; i < num_of_rows; i++) + { + int ip_out = ((q31_t)(bias[i]) << bias_shift) + NN_ROUND(out_shift); + for (j = 0; j < dim_vec; j++) + { + ip_out += pV[j] * pM[i * dim_vec + j]; + } + pOut[i] = (q15_t) __SSAT((ip_out >> out_shift), 16); + } + +#endif /* ARM_MATH_DSP */ + + /* Return to application */ + return (ARM_MATH_SUCCESS); + +} + +/** + * @} end of FC group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_q15_opt.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_q15_opt.c new file mode 100755 index 00000000000..72a39dd967d --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_q15_opt.c @@ -0,0 +1,332 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_fully_connected_q15_opt.c + * Description: Q15 opt fully-connected layer function + * + * $Date: 17. January 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup FC + * @{ + */ + + /** + * @brief Q15 opt fully-connected layer function + * @param[in] pV pointer to input vector + * @param[in] pM pointer to matrix weights + * @param[in] dim_vec length of the vector + * @param[in] num_of_rows number of rows in weight matrix + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in] bias pointer to bias + * @param[in,out] pOut pointer to output vector + * @param[in,out] vec_buffer pointer to buffer space for input + * @return The function returns ARM_MATH_SUCCESS + * + * + * @details + * + * Buffer size: + * + * vec_buffer size: 0 + * + * Here we use only one pointer to read 4 rows in the weight + * matrix. So if the original matrix looks like this: + * + * | a11 | a12 | a13 | + * + * | a21 | a22 | a23 | + * + * | a31 | a32 | a33 | + * + * | a41 | a42 | a43 | + * + * | a51 | a52 | a53 | + * + * | a61 | a62 | a63 | + * + * We operates on multiple-of-4 rows, so the first four rows becomes + * + * | a11 | a12 | a21 | a22 | a31 | a32 | a41 | a42 | + * + * | a13 | a23 | a33 | a43 | + * + * Remaining rows are kept the same original order. + * + * So the stored weight matrix looks like this: + * + * + * | a11 | a12 | a21 | a22 | a31 | a32 | a41 | a42 | + * + * | a13 | a23 | a33 | a43 | a51 | a52 | a53 | a61 | + * + * | a62 | a63 | + */ + +arm_status +arm_fully_connected_q15_opt(const q15_t * pV, + const q15_t * pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, + const q15_t * bias, + q15_t * pOut, + q15_t * vec_buffer) +{ + (void)vec_buffer; +#if defined (ARM_MATH_DSP) + /* Run the following code for Cortex-M4 and Cortex-M7 */ + + const q15_t *pB = pM; + q15_t *pO = pOut; + const q15_t *pBias = bias; + const q15_t *pA = pV; + + uint16_t rowCnt = num_of_rows >> 2; + + while (rowCnt) + { + q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum3 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum4 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + + uint16_t colCnt = dim_vec >> 1; + + pA = pV; + +#ifdef USE_INTRINSIC + + while (colCnt) + { + q31_t inM11, inM12, inM13, inM14; + q31_t inV; + + inV = arm_nn_read_q15x2_ia(&pA); + inM11 = arm_nn_read_q15x2_ia(&pB); + sum = __SMLAD(inV, inM11, sum); + inM12 = arm_nn_read_q15x2_ia(&pB); + sum2 = __SMLAD(inV, inM12, sum2); + inM13 = arm_nn_read_q15x2_ia(&pB); + sum3 = __SMLAD(inV, inM13, sum3); + inM14 = arm_nn_read_q15x2_ia(&pB); + sum4 = __SMLAD(inV, inM14, sum4); + colCnt--; + } + +#else + + /* + * register needed: + * loop counter: colCnt + * accumulators: sum, sum2, sum3, sum4 + * pointers: pB, pA + * weight data: inM11, inM12, inM13, inM14 + * activation data: inV + */ + + asm volatile ("COL_LOOP_%=:\n" + "ldr.w r4, [%[pA]], #4\n" + "ldr.w r0, [%[pB]], #16\n" + "smlad %[sum], r4, r0, %[sum]\n" + "ldr.w r1, [%[pB] , #-12]\n" + "smlad %[sum2], r4, r1, %[sum2]\n" + "ldr.w r2, [%[pB] , #-8]\n" + "smlad %[sum3], r4, r2, %[sum3]\n" + "ldr.w r3, [%[pB] , #-4]\n" + "smlad %[sum4], r4, r3, %[sum4]\n" + "subs %[colCnt], #1\n" + "bne COL_LOOP_%=\n":[sum] "+r"(sum), + [sum2] "+r"(sum2),[sum3] "+r"(sum3), + [sum4] "+r"(sum4),[pB] "+r"(pB),[pA] "+r"(pA):[colCnt] "r"(colCnt):"r0", "r1", "r2", "r3", "r4"); + +#endif /* USE_INTRINSIC */ + + colCnt = dim_vec & 0x1; + while (colCnt) + { + + q15_t inV = *pA++; + q15_t inM = *pB++; + q15_t inM2 = *pB++; + q15_t inM3 = *pB++; + q15_t inM4 = *pB++; + + sum += inV * inM; + sum2 += inV * inM2; + sum3 += inV * inM3; + sum4 += inV * inM4; + colCnt--; + } /* while over colCnt */ + *pO++ = (q15_t) (__SSAT((sum >> out_shift), 16)); + *pO++ = (q15_t) (__SSAT((sum2 >> out_shift), 16)); + *pO++ = (q15_t) (__SSAT((sum3 >> out_shift), 16)); + *pO++ = (q15_t) (__SSAT((sum4 >> out_shift), 16)); + + /* adjust the pointers and counters */ + rowCnt--; + } + + /* left-over part of the rows */ + rowCnt = num_of_rows & 0x3; + + while (rowCnt) + { + q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + + uint16_t colCnt = dim_vec >> 2; + + pA = pV; + + while (colCnt) + { + q31_t inV1, inV2, inM1, inM2; + + inM1 = arm_nn_read_q15x2_ia(&pB); + inV1 = arm_nn_read_q15x2_ia(&pA); + sum = __SMLAD(inV1, inM1, sum); + + inM2 = arm_nn_read_q15x2_ia(&pB); + inV2 = arm_nn_read_q15x2_ia(&pA); + sum = __SMLAD(inV2, inM2, sum); + + colCnt--; + } + + /* left-over of the vector */ + colCnt = dim_vec & 0x3; + while (colCnt) + { + q15_t inV = *pA++; + q15_t inM = *pB++; + sum += inV * inM; + colCnt--; + } + + *pO++ = (q15_t) (__SSAT((sum >> out_shift), 16)); + + rowCnt--; + } + +#else + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + uint16_t rowCnt = num_of_rows >> 2; + const q15_t *pB = pM; + const q15_t *pA; + q15_t *pO = pOut; + const q15_t *pBias = bias; + + while (rowCnt) + { + q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum3 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum4 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + + uint16_t colCnt = dim_vec >> 1; + + pA = pV; + while (colCnt) + { + q15_t inA1 = *pA++; + q15_t inA2 = *pA++; + + q15_t inB1 = *pB++; + q15_t inB2 = *pB++; + sum += inA1 * inB1 + inA2 * inB2; + + inB1 = *pB++; + inB2 = *pB++; + sum2 += inA1 * inB1 + inA2 * inB2; + + inB1 = *pB++; + inB2 = *pB++; + sum3 += inA1 * inB1 + inA2 * inB2; + + inB1 = *pB++; + inB2 = *pB++; + sum4 += inA1 * inB1 + inA2 * inB2; + + colCnt--; + } + colCnt = dim_vec & 0x1; + while (colCnt) + { + q15_t inA = *pA++; + q15_t inB = *pB++; + sum += inA * inB; + inB = *pB++; + sum2 += inA * inB; + inB = *pB++; + sum3 += inA * inB; + inB = *pB++; + sum4 += inA * inB; + colCnt--; + } + *pO++ = (q15_t) __SSAT((sum >> out_shift), 16); + *pO++ = (q15_t) __SSAT((sum2 >> out_shift), 16); + *pO++ = (q15_t) __SSAT((sum3 >> out_shift), 16); + *pO++ = (q15_t) __SSAT((sum4 >> out_shift), 16); + + rowCnt--; + } + rowCnt = num_of_rows & 0x3; + + while (rowCnt) + { + int ip_out = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + int j; + + pA = pV; + for (j = 0; j < dim_vec; j++) + { + q15_t inA = *pA++; + q15_t inB = *pB++; + ip_out += inA * inB; + } + *pO++ = (q15_t) __SSAT((ip_out >> out_shift), 16); + + rowCnt--; + } + +#endif /* ARM_MATH_DSP */ + + /* Return to ARM_MATH_SUCCESS */ + return (ARM_MATH_SUCCESS); + +} + +/** + * @} end of FC group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_q7.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_q7.c new file mode 100755 index 00000000000..8e499bca2e8 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_q7.c @@ -0,0 +1,198 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_fully_connected_q7.c + * Description: Q7 basic fully-connected layer function + * + * $Date: 17. January 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup FC + * @{ + */ + + /** + * @brief Q7 basic fully-connected layer function + * @param[in] pV pointer to input vector + * @param[in] pM pointer to matrix weights + * @param[in] dim_vec length of the vector + * @param[in] num_of_rows number of rows in weight matrix + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in] bias pointer to bias + * @param[in,out] pOut pointer to output vector + * @param[in,out] vec_buffer pointer to buffer space for input + * @return The function returns ARM_MATH_SUCCESS + * + * @details + * + * Buffer size: + * + * vec_buffer size: dim_vec + * + * This basic function is designed to work with regular weight + * matrix without interleaving. + * + */ + +arm_status +arm_fully_connected_q7(const q7_t * pV, + const q7_t * pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, const q7_t * bias, q7_t * pOut, q15_t * vec_buffer) +{ + +#if defined (ARM_MATH_DSP) + /* Run the following code for Cortex-M4 and Cortex-M7 */ + + const q7_t *pB = pM; + const q7_t *pB2; + q7_t *pO = pOut; + const q7_t *pBias = bias; + const q15_t *pA; + uint16_t rowCnt = num_of_rows >> 1; + + /* expand the vector into the buffer */ + arm_q7_to_q15_reordered_no_shift(pV, vec_buffer, dim_vec); + + while (rowCnt) + { + q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + uint16_t colCnt = dim_vec >> 2; + + pA = vec_buffer; + pB2 = pB + dim_vec; + + while (colCnt) + { + q31_t inV, inM11, inM12, inM21, inM22; + pB = read_and_pad_reordered(pB, &inM11, &inM12); + pB2 = read_and_pad_reordered(pB2, &inM21, &inM22); + + inV = arm_nn_read_q15x2_ia(&pA); + + sum = __SMLAD(inV, inM11, sum); + sum2 = __SMLAD(inV, inM21, sum2); + + inV = arm_nn_read_q15x2_ia(&pA); + + sum = __SMLAD(inV, inM12, sum); + sum2 = __SMLAD(inV, inM22, sum2); + + colCnt--; + } + colCnt = dim_vec & 0x3; + while (colCnt) + { + q7_t inV = *pA++; + q15_t inM = *pB++; + q15_t inM2 = *pB2++; + + sum += inV * inM; + sum2 += inV * inM2; + colCnt--; + } /* while over colCnt */ + *pO++ = (q7_t) (__SSAT((sum >> out_shift), 8)); + *pO++ = (q7_t) (__SSAT((sum2 >> out_shift), 8)); + + /* adjust the pointers and counters */ + pB += dim_vec; + rowCnt--; + } + + /* left-over part of the rows */ + rowCnt = num_of_rows & 0x1; + + while (rowCnt) + { + uint16_t colCnt = dim_vec >> 2; + q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + + pA = vec_buffer; + + while (colCnt) + { + q31_t inV1, inV2, inM11, inM12; + + pB = read_and_pad_reordered(pB, &inM11, &inM12); + + inV1 = arm_nn_read_q15x2_ia(&pA); + sum = __SMLAD(inV1, inM11, sum); + + inV2 = arm_nn_read_q15x2_ia(&pA); + sum = __SMLAD(inV2, inM12, sum); + + colCnt--; + } + + /* left-over of the vector */ + colCnt = dim_vec & 0x3; + while (colCnt) + { + q7_t inV = *pA++; + q15_t inM = *pB++; + sum += inV * inM; + colCnt--; + } + + *pO++ = (q7_t) (__SSAT((sum >> out_shift), 8)); + + rowCnt--; + } + +#else + int i, j; + + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + for (i = 0; i < num_of_rows; i++) + { + int ip_out = ((q31_t)(bias[i]) << bias_shift) + NN_ROUND(out_shift); + for (j = 0; j < dim_vec; j++) + { + ip_out += pV[j] * pM[i * dim_vec + j]; + } + pOut[i] = (q7_t) __SSAT((ip_out >> out_shift), 8); + } + +#endif /* ARM_MATH_DSP */ + + /* Return to ARM_MATH_SUCCESS */ + return (ARM_MATH_SUCCESS); + +} + +/** + * @} end of FC group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_q7_opt.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_q7_opt.c new file mode 100755 index 00000000000..5ddf7caf366 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_q7_opt.c @@ -0,0 +1,484 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_fully_connected_q7_opt.c + * Description: Q7 basic fully-connected layer function + * + * $Date: 17. January 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup FC + * @{ + */ + + /** + * @brief Q7 opt fully-connected layer function + * @param[in] pV pointer to input vector + * @param[in] pM pointer to matrix weights + * @param[in] dim_vec length of the vector + * @param[in] num_of_rows number of rows in weight matrix + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in] bias pointer to bias + * @param[in,out] pOut pointer to output vector + * @param[in,out] vec_buffer pointer to buffer space for input + * @return The function returns ARM_MATH_SUCCESS + * + * @details + * + * Buffer size: + * + * vec_buffer size: dim_vec + * + * This opt function is designed to work with interleaved weight + * matrix. The vector input is assumed in q7_t format, we call + * arm_q7_to_q15_no_shift_shuffle function to expand into + * q15_t format with certain weight re-ordering, refer to the function + * comments for more details. + * Here we use only one pointer to read 4 rows in the weight + * matrix. So if the original q7_t matrix looks like this: + * + * | a11 | a12 | a13 | a14 | a15 | a16 | a17 | + * + * | a21 | a22 | a23 | a24 | a25 | a26 | a27 | + * + * | a31 | a32 | a33 | a34 | a35 | a36 | a37 | + * + * | a41 | a42 | a43 | a44 | a45 | a46 | a47 | + * + * | a51 | a52 | a53 | a54 | a55 | a56 | a57 | + * + * | a61 | a62 | a63 | a64 | a65 | a66 | a67 | + * + * + * We operates on multiple-of-4 rows, so the first four rows becomes + * + * | a11 | a21 | a13 | a23 | a31 | a41 | a33 | a43 | + * + * | a12 | a22 | a14 | a24 | a32 | a42 | a34 | a44 | + * + * | a15 | a25 | a35 | a45 | a16 | a26 | a36 | a46 | + * + * So within the kernel, we first read the re-ordered vector in as: + * + * | b1 | b3 | and | b2 | b4 | + * + * the four q31_t weights will look like + * + * | a11 | a13 |, | a21 | a23 |, | a31 | a33 |, | a41 | a43 | + * + * | a12 | a14 |, | a22 | a24 |, | a32 | a34 |, | a42 | a44 | + * + * The column left over will be in-order. + * which is: + * + * | a17 | a27 | a37 | a47 | + * + * For the left-over rows, we do 1x1 computation, so the data remains + * as its original order. + * + * So the stored weight matrix looks like this: + * + * | a11 | a21 | a13 | a23 | a31 | a41 | + * + * | a33 | a43 | a12 | a22 | a14 | a24 | + * + * | a32 | a42 | a34 | a44 | a15 | a25 | + * + * | a35 | a45 | a16 | a26 | a36 | a46 | + * + * | a17 | a27 | a37 | a47 | a51 | a52 | + * + * | a53 | a54 | a55 | a56 | a57 | a61 | + * + * | a62 | a63 | a64 | a65 | a66 | a67 | + * + * + */ + +arm_status +arm_fully_connected_q7_opt(const q7_t * pV, + const q7_t * pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, + const q7_t * bias, + q7_t * pOut, + q15_t * vec_buffer) +{ + +#if defined (ARM_MATH_DSP) + /* Run the following code for Cortex-M4 and Cortex-M7 */ + + const q7_t *pB = pM; + q7_t *pO = pOut; + const q7_t *pBias = bias; + const q15_t *pA; + uint16_t rowCnt = num_of_rows >> 2; + + arm_q7_to_q15_reordered_no_shift(pV, vec_buffer, dim_vec); + + while (rowCnt) + { + + q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum3 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum4 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + + uint16_t colCnt = dim_vec >> 2; + + pA = vec_buffer; + +#ifdef USE_INTRINSIC + +#ifndef ARM_MATH_BIG_ENDIAN + while (colCnt) + { + q31_t inM11, inM12, inM13, inM14; + q31_t inV; + + inV = arm_nn_read_q15x2_ia(&pA); + inM11 = arm_nn_read_q7x4_ia(&pB); + inM12 = __SXTB16(__ROR(inM11, 8)); + inM11 = __SXTB16(inM11); + sum = __SMLAD(inM11, inV, sum); + sum2 = __SMLAD(inM12, inV, sum2); + inM13 = arm_nn_read_q7x4_ia(&pB); + inM14 = __SXTB16(__ROR(inM13, 8)); + inM13 = __SXTB16(inM13); + sum3 = __SMLAD(inM13, inV, sum3); + sum4 = __SMLAD(inM14, inV, sum4); + + inV = arm_nn_read_q15x2_ia(&pA); + inM11 = arm_nn_read_q7x4_ia(&pB); + inM12 = __SXTB16(__ROR(inM11, 8)); + inM11 = __SXTB16(inM11); + sum = __SMLAD(inM11, inV, sum); + sum2 = __SMLAD(inM12, inV, sum2); + inM13 = arm_nn_read_q7x4_ia(&pB); + inM14 = __SXTB16(__ROR(inM13, 8)); + inM13 = __SXTB16(inM13); + sum3 = __SMLAD(inM13, inV, sum3); + sum4 = __SMLAD(inM14, inV, sum4); + colCnt--; + } +#else + while (colCnt) + { + q31_t inM11, inM12, inM13, inM14; + q31_t inV; + + inV = arm_nn_read_q15x2_ia(&pA); + inM11 = arm_nn_read_q7x4_ia(&pB); + inM12 = __SXTB16(__ROR(inM11, 8)); + inM11 = __SXTB16(inM11); + sum = __SMLAD(inM12, inV, sum); + sum2 = __SMLAD(inM11, inV, sum2); + inM13 = arm_nn_read_q7x4_ia(&pB); + inM14 = __SXTB16(__ROR(inM13, 8)); + inM13 = __SXTB16(inM13); + sum3 = __SMLAD(inM14, inV, sum3); + sum4 = __SMLAD(inM13, inV, sum4); + + inV = arm_nn_read_q15x2_ia(&pA); + inM11 = arm_nn_read_q7x4_ia(&pB); + inM12 = __SXTB16(__ROR(inM11, 8)); + inM11 = __SXTB16(inM11); + sum = __SMLAD(inM12, inV, sum); + sum2 = __SMLAD(inM11, inV, sum2); + inM13 = arm_nn_read_q7x4_ia(&pB); + inM14 = __SXTB16(__ROR(inM13, 8)); + inM13 = __SXTB16(inM13); + sum3 = __SMLAD(inM14, inV, sum3); + sum4 = __SMLAD(inM13, inV, sum4); + colCnt--; + } +#endif /* ARM_MATH_BIG_ENDIAN */ + +#else + + /* + * register needed: + * loop counter: colCnt + * accumulators: sum, sum2, sum3, sum4 + * pointers: pB, pA + * weight data: inM11, inM12, inM13, inM14 + * activation data: inV + */ + +#ifndef ARM_MATH_BIG_ENDIAN + asm volatile ("COL_LOOP_%=:\n" + "ldr.w r4, [%[pA]], #8\n" + "ldr.w r1, [%[pB]], #16\n" + "mov.w r0, r1, ror #8\n" + "sxtb16 r0, r0\n" + "sxtb16 r1, r1\n" + "smlad %[sum], r4, r1, %[sum]\n" + "smlad %[sum2], r4, r0, %[sum2]\n" + "ldr.w r3, [%[pB], #-12]\n" + "mov.w r2, r3, ror #8\n" + "sxtb16 r2, r2\n" + "sxtb16 r3, r3\n" + "smlad %[sum3], r4, r3, %[sum3]\n" + "smlad %[sum4], r4, r2, %[sum4]\n" + "ldr.w r4, [%[pA], #-4]\n" + "ldr.w r1, [%[pB], #-8]\n" + "mov.w r0, r1, ror #8\n" + "sxtb16 r0, r0\n" + "sxtb16 r1, r1\n" + "smlad %[sum], r4, r1, %[sum]\n" + "smlad %[sum2], r4, r0, %[sum2]\n" + "ldr.w r3, [%[pB], #-4]\n" + "mov.w r2, r3, ror #8\n" + "sxtb16 r2, r2\n" + "sxtb16 r3, r3\n" + "smlad %[sum3], r4, r3, %[sum3]\n" + "smlad %[sum4], r4, r2, %[sum4]\n" + "subs %[colCnt], #1\n" + "bne COL_LOOP_%=\n":[sum] "+r"(sum), + [sum2] "+r"(sum2),[sum3] "+r"(sum3), + [sum4] "+r"(sum4),[pB] "+r"(pB),[pA] "+r"(pA):[colCnt] "r"(colCnt):"r0", "r1", "r2", "r3", "r4"); +#else + asm volatile ("COL_LOOP_%=:\n" + "ldr.w r4, [%[pA]], #8\n" + "ldr.w r1, [%[pB]], #16\n" + "mov.w r0, r1, ror #8\n" + "sxtb16 r0, r0\n" + "sxtb16 r1, r1\n" + "smlad %[sum], r4, r0, %[sum]\n" + "smlad %[sum2], r4, r1, %[sum2]\n" + "ldr.w r3, [%[pB], #-12]\n" + "mov.w r2, r3, ror #8\n" + "sxtb16 r2, r2\n" + "sxtb16 r3, r3\n" + "smlad %[sum3], r4, r2, %[sum3]\n" + "smlad %[sum4], r4, r3, %[sum4]\n" + "ldr.w r4, [%[pA], #-4]\n" + "ldr.w r1, [%[pB], #-8]\n" + "mov.w r0, r1, ror #8\n" + "sxtb16 r0, r0\n" + "sxtb16 r1, r1\n" + "smlad %[sum], r4, r0, %[sum]\n" + "smlad %[sum2], r4, r1, %[sum2]\n" + "ldr.w r3, [%[pB], #-4]\n" + "mov.w r2, r3, ror #8\n" + "sxtb16 r2, r2\n" + "sxtb16 r3, r3\n" + "smlad %[sum3], r4, r2, %[sum3]\n" + "smlad %[sum4], r4, r3, %[sum4]\n" + "subs %[colCnt], #1\n" + "bne COL_LOOP_%=\n":[sum] "+r"(sum), + [sum2] "+r"(sum2),[sum3] "+r"(sum3), + [sum4] "+r"(sum4),[pB] "+r"(pB),[pA] "+r"(pA):[colCnt] "r"(colCnt):"r0", "r1", "r2", "r3", "r4"); +#endif /* ARM_MATH_BIG_ENDIAN */ + +#endif /* USE_INTRINSIC */ + + colCnt = dim_vec & 0x3; + while (colCnt) + { + q15_t inV = *pA++; + q7_t inM = *pB++; + q7_t inM2 = *pB++; + q7_t inM3 = *pB++; + q7_t inM4 = *pB++; + + sum += inV * inM; + sum2 += inV * inM2; + sum3 += inV * inM3; + sum4 += inV * inM4; + colCnt--; + } /* while over colCnt */ + *pO++ = (q7_t) (__SSAT((sum >> out_shift), 8)); + *pO++ = (q7_t) (__SSAT((sum2 >> out_shift), 8)); + *pO++ = (q7_t) (__SSAT((sum3 >> out_shift), 8)); + *pO++ = (q7_t) (__SSAT((sum4 >> out_shift), 8)); + + /* adjust the pointers and counters */ + rowCnt--; + } + + /* left-over part of the rows */ + rowCnt = num_of_rows & 0x3; + + while (rowCnt) + { + q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + uint16_t colCnt = dim_vec >> 2; + + pA = vec_buffer; + + while (colCnt) + { + q31_t inV1, inV2, inM11, inM12; + + pB = read_and_pad_reordered(pB, &inM11, &inM12); + + inV1 = arm_nn_read_q15x2_ia(&pA); + sum = __SMLAD(inV1, inM11, sum); + + inV2 = arm_nn_read_q15x2_ia(&pA); + sum = __SMLAD(inV2, inM12, sum); + + colCnt--; + } + + /* left-over of the vector */ + colCnt = dim_vec & 0x3; + while (colCnt) + { + q15_t inV = *pA++; + q7_t inM = *pB++; + sum += inV * inM; + colCnt--; + } + + *pO++ = (q7_t) (__SSAT((sum >> out_shift), 8)); + + rowCnt--; + } + +#else + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + uint16_t rowCnt = num_of_rows >> 2; + const q7_t *pB = pM; + const q7_t *pA; + q7_t *pO = pOut; + const q7_t *pBias = bias; + + while (rowCnt) + { + q31_t sum = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum2 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum3 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + q31_t sum4 = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + + uint16_t colCnt = dim_vec >> 2; + + pA = pV; + + while (colCnt) + { + q7_t inA1 = *pA++; + q7_t inA3 = *pA++; + q7_t inA2 = *pA++; + q7_t inA4 = *pA++; + + q7_t inB1 = *pB++; + q7_t inB3 = *pB++; + q7_t inB2 = *pB++; + q7_t inB4 = *pB++; + + sum += inA1 * inB1 + inA2 * inB2; + sum2 += inA1 * inB3 + inA2 * inB4; + + inB1 = *pB++; + inB3 = *pB++; + inB2 = *pB++; + inB4 = *pB++; + + sum3 += inA1 * inB1 + inA2 * inB2; + sum4 += inA1 * inB3 + inA2 * inB4; + + inB1 = *pB++; + inB3 = *pB++; + inB2 = *pB++; + inB4 = *pB++; + + sum += inA3 * inB1 + inA4 * inB2; + sum2 += inA3 * inB3 + inA4 * inB4; + + inB1 = *pB++; + inB3 = *pB++; + inB2 = *pB++; + inB4 = *pB++; + + sum3 += inA3 * inB1 + inA4 * inB2; + sum4 += inA3 * inB3 + inA4 * inB4; + + colCnt--; + } + colCnt = dim_vec & 0x3; + while (colCnt) + { + q7_t inA = *pA++; + q7_t inB = *pB++; + sum += inA * inB; + inB = *pB++; + sum2 += inA * inB; + inB = *pB++; + sum3 += inA * inB; + inB = *pB++; + sum4 += inA * inB; + + colCnt--; + } + *pO++ = (q7_t) __SSAT((sum >> out_shift), 8); + *pO++ = (q7_t) __SSAT((sum2 >> out_shift), 8); + *pO++ = (q7_t) __SSAT((sum3 >> out_shift), 8); + *pO++ = (q7_t) __SSAT((sum4 >> out_shift), 8); + + rowCnt--; + } + + rowCnt = num_of_rows & 0x3; + + while (rowCnt) + { + int ip_out = ((q31_t)(*pBias++) << bias_shift) + NN_ROUND(out_shift); + + int j; + + pA = pV; + for (j = 0; j < dim_vec; j++) + { + q7_t inA = *pA++; + q7_t inB = *pB++; + ip_out += inA * inB; + } + *pO++ = (q7_t) __SSAT((ip_out >> out_shift), 8); + + rowCnt--; + } + +#endif /* ARM_MATH_DSP */ + + /* Return to ARM_MATH_SUCCESS */ + return (ARM_MATH_SUCCESS); + +} + +/** + * @} end of FC group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_s8.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_s8.c new file mode 100755 index 00000000000..730fdf8940f --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions/arm_fully_connected_s8.c @@ -0,0 +1,289 @@ +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_fully_connected_s8 + * Description: Fully connected function compatible with TF Lite. + * + * $Date: April 1, 2020 + * $Revision: V.1.5.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup FC + * @{ + */ + +/* + * S8 basic fully-connected and matrix multiplication layer function for TensorFlow Lite + * + * Refer header file for details. + * + */ + +#if defined(ARM_MATH_MVEI) + +arm_status +arm_fully_connected_s8(const int8_t *input, + const int8_t *kernel, + const uint16_t col_dim, + const uint16_t row_dim, + const uint16_t nb_batches, + const int32_t input_offset, + const int32_t filter_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t output_offset, + const int32_t *bias, + int8_t *output, + const int32_t output_activation_min, + const int32_t output_activation_max, + q15_t *vec_buffer) +{ + (void)vec_buffer; + const int8_t *input_a; + const int32_t *bias_tmp = bias; + const int8_t *weight_tmp = kernel; + int32_t batch_count = nb_batches; + + const int16x8_t filter_offset_vec = vdupq_n_s16((int16_t)filter_offset); + const int16x8_t input_offset_vec = vdupq_n_s16((int16_t)input_offset); + + while (batch_count) + { + bias_tmp = bias; + weight_tmp = kernel; + + int cnt; + cnt = row_dim >> 2; + + for (int out_c = 0; out_c < cnt; out_c++) + { + int32_t acc1 = *bias_tmp++; + int32_t acc2 = *bias_tmp++; + int32_t acc3 = *bias_tmp++; + int32_t acc4 = *bias_tmp++; + input_a = input; + + int16x8_t input_val, filter_val; + int16x8_t tmp_a1, tmp_a2, tmp_a3, tmp_a4, tmp_b; + int32x4_t acc; + int32_t block_count; + + const int8_t *col = input_a; + const int8_t *row_0 = weight_tmp; + const int8_t *row_1 = weight_tmp + col_dim; + const int8_t *row_2 = weight_tmp + 2 * col_dim; + const int8_t *row_3 = weight_tmp + 3 * col_dim; + + block_count = col_dim >> 3U; + + while (block_count > 0U) + { + input_val = vldrbq_s16(col); + tmp_b = vaddq_s16(input_val, input_offset_vec); + + filter_val = vldrbq_s16(row_0); + tmp_a1 = vaddq_s16(filter_val, filter_offset_vec); + acc1 = vmladavaq_s16(acc1, tmp_a1, tmp_b); + + filter_val = vldrbq_s16(row_1); + tmp_a2 = vaddq_s16(filter_val, filter_offset_vec); + acc2 = vmladavaq_s16(acc2, tmp_a2, tmp_b); + + filter_val = vldrbq_s16(row_2); + tmp_a3 = vaddq_s16(filter_val, filter_offset_vec); + acc3 = vmladavaq_s16(acc3, tmp_a3, tmp_b); + + filter_val = vldrbq_s16(row_3); + tmp_a4 = vaddq_s16(filter_val, filter_offset_vec); + acc4 = vmladavaq_s16(acc4, tmp_a4, tmp_b); + + col += 8; + row_0 += 8; + row_1 += 8; + row_2 += 8; + row_3 += 8; + block_count--; + } + + block_count = col_dim & 7; + + while (block_count > 0U) + { + q15_t col_ip = *col++; + + q7_t in_m1 = *row_0++; + q7_t in_m2 = *row_1++; + q7_t in_m3 = *row_2++; + q7_t in_m4 = *row_3++; + + acc1 += (col_ip + input_offset) * (in_m1 + filter_offset); + acc2 += (col_ip + input_offset) * (in_m2 + filter_offset); + acc3 += (col_ip + input_offset) * (in_m3 + filter_offset); + acc4 += (col_ip + input_offset) * (in_m4 + filter_offset); + + block_count--; + } + + input_a = input + col_dim; + weight_tmp += 4 * col_dim; + + acc[0] = acc1; + acc[1] = acc2; + acc[2] = acc3; + acc[3] = acc4; + + acc = arm_requantize_mve(acc, out_mult, out_shift); + acc = vaddq_s32(acc, vdupq_n_s32(output_offset)); + acc = vmaxq_s32(acc, vdupq_n_s32(output_activation_min)); + acc = vminq_s32(acc, vdupq_n_s32(output_activation_max)); + + vstrbq_s32(output, acc); + + output += 4; + } + + cnt = row_dim & 3; + for (int out_c = 0; out_c < cnt; out_c++) + { + + int32_t acc = *bias_tmp++; + input_a = input; + + int16x8_t input_val, filter_val; + int16x8_t tmp_a, tmp_b; + int32_t block_count; + + const int8_t *col = input_a; + const int8_t *kernel_cur = weight_tmp; + + block_count = col_dim >> 3U; + + while (block_count > 0U) + { + + input_val = vldrbq_s16(col); + filter_val = vldrbq_s16(kernel_cur); + + tmp_a = vaddq_s16(filter_val, filter_offset_vec); + tmp_b = vaddq_s16(input_val, input_offset_vec); + + acc = vmladavaq_s16(acc, tmp_a, tmp_b); + + col += 8; + kernel_cur += 8; + block_count--; + } + + block_count = col_dim & 7; + + while (block_count > 0U) + { + q15_t col_ip = *col++; + q7_t in_m = *kernel_cur++; + + acc += (col_ip + input_offset) * (in_m + filter_offset); + + block_count--; + } + + input_a += col_dim; + weight_tmp += col_dim; + + acc = arm_nn_sat_doubling_high_mult(acc * (1 << LEFT_SHIFT(out_shift)), out_mult); + acc = arm_nn_divide_by_power_of_two(acc, RIGHT_SHIFT(out_shift)); + + acc += output_offset; + + acc = MAX(acc, output_activation_min); + acc = MIN(acc, output_activation_max); + + *output++ = (int8_t)(acc); + } + input += col_dim; + batch_count--; + } + return (ARM_MATH_SUCCESS); +} + +#else +arm_status +arm_fully_connected_s8(const int8_t *input, + const int8_t *kernel, + const uint16_t col_dim, + const uint16_t row_dim, + const uint16_t nb_batches, + const int32_t input_offset, + const int32_t filter_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t output_offset, + const int32_t *bias, + int8_t *output, + const int32_t output_activation_min, + const int32_t output_activation_max, + q15_t *vec_buffer) +{ + (void)vec_buffer; + + uint16_t batch_cnt = nb_batches; + + while (batch_cnt) + { + arm_nn_vec_mat_mult_t_s8(input, + kernel, + bias, + output, + input_offset, + filter_offset, + output_offset, + out_mult, + out_shift, + col_dim, + row_dim, + output_activation_min, + output_activation_max); + input += col_dim; + output += row_dim; + batch_cnt--; + } + return (ARM_MATH_SUCCESS); +} +#endif /* ARM_MATH_HELIUM */ + +int32_t arm_fully_connected_s8_get_buffer_size(const uint16_t col_dim) +{ + (void)col_dim; + return 0; +} + +/** + * @} end of FC group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/CMakeLists.txt b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/CMakeLists.txt new file mode 100755 index 00000000000..56bce6862ba --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/CMakeLists.txt @@ -0,0 +1,19 @@ +cmake_minimum_required (VERSION 3.6) + +project(CMSISNNSupport) + +include(configLib) + +file(GLOB SRC "./*_*.c") + +add_library(CMSISNNSupport STATIC ${SRC}) + +configLib(CMSISNNSupport ${ROOT}) +configDsp(CMSISNNSupport ${ROOT}) + +### Includes +target_include_directories(CMSISNNSupport PUBLIC "${NN}/Include") +target_include_directories(CMSISNNSupport PUBLIC "${ROOT}/CMSIS/DSP/Include") + + + diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_accumulate_q7_to_q15.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_accumulate_q7_to_q15.c new file mode 100755 index 00000000000..ed4ef739097 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_accumulate_q7_to_q15.c @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_accumulate_q7_to_q15.c + * Description: Accumulate q7 vector into q15 one. + * + * $Date: July 2019 + * $Revision: V.1.0.0 + * + * pSrc Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup NNBasicMath + * @{ + */ + +void arm_nn_accumulate_q7_to_q15(q15_t *pDst, const q7_t *pSrc, uint32_t length) +{ + q15_t *pCnt = pDst; + const q7_t *pV = pSrc; + q31_t v1, v2, vo1, vo2; + int32_t cnt = length >> 2; + q31_t in; + + while (cnt > 0l) + { + q31_t value = arm_nn_read_q7x4_ia(&pV); + v1 = __SXTB16(__ROR(value, 8)); + v2 = __SXTB16(value); +#ifndef ARM_MATH_BIG_ENDIAN + + vo2 = __PKHTB(v1, v2, 16); + vo1 = __PKHBT(v2, v1, 16); + +#else + + vo1 = __PKHTB(v1, v2, 16); + vo2 = __PKHBT(v2, v1, 16); + +#endif + + in = arm_nn_read_q15x2(pCnt); + write_q15x2_ia(&pCnt, __QADD16(vo1, in)); + + in = arm_nn_read_q15x2(pCnt); + write_q15x2_ia(&pCnt, __QADD16(vo2, in)); + + cnt--; + } + cnt = length & 0x3; + while (cnt > 0l) + { + *pCnt++ += *pV++; + cnt--; + } +} + +/** + * @} end of NNBasicMath group + */ \ No newline at end of file diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_add_q7.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_add_q7.c new file mode 100755 index 00000000000..f860415b665 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_add_q7.c @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_add_q7.c + * Description: Non saturating addition of elements of a q7 vector. + * + * $Date: July 2019 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup NNBasicMath + * @{ + */ + +void arm_nn_add_q7(const q7_t *input, q31_t *output, uint32_t block_size) +{ + uint32_t block_count; + q31_t result = 0; +#if defined(ARM_MATH_DSP) + /* Loop unrolling: Compute 4 outputs at a time */ + block_count = block_size >> 2U; + + while (block_count > 0U) + { + const int32_t mult_q15x2 = (1UL << 16) | 1UL; + q31_t in_q7x4 = arm_nn_read_q7x4_ia(&input); + q31_t temp_q15x2 = __SXTAB16(__SXTB16(in_q7x4), __ROR(in_q7x4, 8)); + + result = __SMLAD(temp_q15x2, mult_q15x2, result); + + /* Decrement loop counter */ + block_count--; + } + + /* Loop unrolling: Compute remaining outputs */ + block_count = block_size & 0x3; +#else + block_count = block_size; +#endif + while (block_count > 0U) + { + /* Add and store result in destination buffer. */ + result += *input++; + + /* Decrement loop counter */ + block_count--; + } + + *output = result; +} + +/** + * @} end of NNBasicMath group + */ \ No newline at end of file diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_padded_s8.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_padded_s8.c new file mode 100755 index 00000000000..3ecd8e526e6 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_padded_s8.c @@ -0,0 +1,169 @@ +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_depthwise_conv_nt_t_padded_s8.c + * Description: Depthwise convolution with padded matrices. + * + * $Date: March 17, 2020 + * $Revision: V.1.0.1 + * + * Target Processor: Cortex-M processors with MVE extension + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup NNBasicMath + * @{ + */ + +/* + * Depthwise convolution of transposed rhs matrix with 4 lhs matrices. One or more of the rhs matrices are padded. + * Dimensions are the same for lhs and rhs. + * + * Refer header file for details. + * + */ + +q7_t *arm_nn_depthwise_conv_nt_t_padded_s8(const q7_t *lhs, + const q7_t *rhs, + const int32_t input_offset, + const uint16_t num_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t row_x_col, + const int32_t *const output_bias, + q7_t *out) +{ +#if defined(ARM_MATH_MVEI) + int32_t loop_count = (num_ch + 3) / 4; + const int32_t *bias = output_bias; + uint32_t num_ch_to_process = num_ch; + + for (int i_loop_cnt = 0, offset = 0; i_loop_cnt < loop_count; + num_ch_to_process -= 4, out += 4, offset += 4, i_loop_cnt++) + { + int32x4_t out_0 = vldrwq_s32(bias); + int32x4_t out_1 = out_0; + int32x4_t out_2 = out_0; + int32x4_t out_3 = out_0; + bias += 4; + + const int8_t *rhs_0 = rhs + offset; + const int8_t *lhs_0 = lhs + offset; + const int8_t *lhs_1 = lhs + row_x_col * num_ch + offset; + const int8_t *lhs_2 = lhs + (row_x_col * num_ch * 2) + offset; + const int8_t *lhs_3 = lhs + (row_x_col * num_ch * 3) + offset; + + for (int i_row_x_col = 0; i_row_x_col < row_x_col; i_row_x_col++) + { + const int32x4_t ker_0 = vldrbq_s32(rhs_0); + + int32x4_t ip_0 = vldrbq_s32(lhs_0); + ip_0 = vaddq_n_s32(ip_0, input_offset); + out_0 += vmulq_s32(ip_0, ker_0); + + int32x4_t ip_1 = vldrbq_s32(lhs_1); + ip_1 = vaddq_n_s32(ip_1, input_offset); + out_1 += vmulq_s32(ip_1, ker_0); + + int32x4_t ip_2 = vldrbq_s32(lhs_2); + ip_2 = vaddq_n_s32(ip_2, input_offset); + out_2 += vmulq_s32(ip_2, ker_0); + + int32x4_t ip_3 = vldrbq_s32(lhs_3); + ip_3 = vaddq_n_s32(ip_3, input_offset); + + out_3 += vmulq_s32(ip_3, ker_0); + + lhs_0 += num_ch; + lhs_1 += num_ch; + lhs_2 += num_ch; + lhs_3 += num_ch; + + rhs_0 += num_ch; + } + + const int32x4_t mult = vldrwq_s32(out_mult); + const int32x4_t shift = vldrwq_s32(out_shift); + out_mult += 4; + out_shift += 4; + + out_0 = arm_requantize_mve_32x4(out_0, mult, shift); + out_0 = vaddq_n_s32(out_0, out_offset); + out_0 = vmaxq_s32(out_0, vdupq_n_s32(activation_min)); + out_0 = vminq_s32(out_0, vdupq_n_s32(activation_max)); + mve_pred16_t p = vctp32q(num_ch_to_process); + vstrbq_p_s32(out, out_0, p); + + out_1 = arm_requantize_mve_32x4(out_1, mult, shift); + out_1 = vaddq_n_s32(out_1, out_offset); + out_1 = vmaxq_s32(out_1, vdupq_n_s32(activation_min)); + out_1 = vminq_s32(out_1, vdupq_n_s32(activation_max)); + vstrbq_p_s32(out + num_ch, out_1, p); + + out_2 = arm_requantize_mve_32x4(out_2, mult, shift); + out_2 = vaddq_n_s32(out_2, out_offset); + out_2 = vmaxq_s32(out_2, vdupq_n_s32(activation_min)); + out_2 = vminq_s32(out_2, vdupq_n_s32(activation_max)); + vstrbq_p_s32(out + 2 * num_ch, out_2, p); + + out_3 = arm_requantize_mve_32x4(out_3, mult, shift); + out_3 = vaddq_n_s32(out_3, out_offset); + out_3 = vmaxq_s32(out_3, vdupq_n_s32(activation_min)); + out_3 = vminq_s32(out_3, vdupq_n_s32(activation_max)); + vstrbq_p_s32(out + 3 * num_ch, out_3, p); + } + + const int tail_ch = num_ch & 0x3; + if (tail_ch != 0) + { + out -= (4 - tail_ch); + } + return out + (3 * num_ch); + +#else + (void)lhs; + (void)rhs; + (void)input_offset; + (void)num_ch; + (void)out_shift; + (void)out_mult; + (void)out_offset; + (void)activation_min; + (void)activation_max; + (void)row_x_col; + (void)output_bias; + (void)out; + return NULL; +#endif +} + +/** + * @} end of NNBasicMath group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s8.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s8.c new file mode 100755 index 00000000000..1bff9c4eae9 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s8.c @@ -0,0 +1,171 @@ +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_depthwise_conv_nt_t_s8.c + * Description: Depthwise convolution on matrices with no padding. + * + * $Date: March 17, 2020 + * $Revision: V.1.0.1 + * + * Target Processor: Cortex-M processors with MVE extension. + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup NNBasicMath + * @{ + */ + +/* + * Depthwise convolution of rhs matrix with 4 lhs matrices with no padding. Dimensions are the same for lhs and rhs. + * + * Refer header file for details. + * + */ + +q7_t *arm_nn_depthwise_conv_nt_t_s8(const q7_t *lhs, + const q7_t *rhs, + const int32_t input_offset, + const uint16_t num_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t row_x_col, + const int32_t *const output_bias, + q7_t *out) +{ +#if defined(ARM_MATH_MVEI) + const int32_t *bias = output_bias; + int32_t loop_count = (num_ch + 3) / 4; + uint32_t num_ch_to_process = num_ch; + + for (int i_loop_cnt = 0, offset = 0; i_loop_cnt < loop_count; + num_ch_to_process -= 4, offset += 4, out += 4, i_loop_cnt++) + { + int32x4_t out_0 = vldrwq_s32(bias); + int32x4_t out_1 = out_0; + int32x4_t out_2 = out_0; + int32x4_t out_3 = out_0; + bias += 4; + + const int8_t *rhs_0 = rhs + offset; + const int8_t *lhs_0 = lhs + offset; + const int8_t *lhs_1 = lhs + row_x_col * num_ch + offset; + const int8_t *lhs_2 = lhs + (row_x_col * num_ch * 2) + offset; + const int8_t *lhs_3 = lhs + (row_x_col * num_ch * 3) + offset; + int32x4_t ker_sum = vdupq_n_s32(0); + + for (int i_row_x_col = 0; i_row_x_col < row_x_col; i_row_x_col++) + { + const int32x4_t ker_0 = vldrbq_s32(rhs_0); + ker_sum = vaddq_s32(ker_sum, ker_0); + + int32x4_t ip_0 = vldrbq_s32(lhs_0); + out_0 += vmulq_s32(ip_0, ker_0); + + int32x4_t ip_1 = vldrbq_s32(lhs_1); + out_1 += vmulq_s32(ip_1, ker_0); + + int32x4_t ip_2 = vldrbq_s32(lhs_2); + out_2 += vmulq_s32(ip_2, ker_0); + + int32x4_t ip_3 = vldrbq_s32(lhs_3); + out_3 += vmulq_s32(ip_3, ker_0); + + lhs_0 += num_ch; + lhs_1 += num_ch; + lhs_2 += num_ch; + lhs_3 += num_ch; + + rhs_0 += num_ch; + } + + ker_sum = vmulq_n_s32(ker_sum, input_offset); + out_0 = ker_sum + out_0; + out_1 = ker_sum + out_1; + out_2 = ker_sum + out_2; + out_3 = ker_sum + out_3; + + const int32x4_t mult = vldrwq_s32(out_mult); + const int32x4_t shift = vldrwq_s32(out_shift); + out_mult += 4; + out_shift += 4; + mve_pred16_t p = vctp32q(num_ch_to_process); + + out_0 = arm_requantize_mve_32x4(out_0, mult, shift); + out_0 = vaddq_n_s32(out_0, out_offset); + out_0 = vmaxq_s32(out_0, vdupq_n_s32(activation_min)); + out_0 = vminq_s32(out_0, vdupq_n_s32(activation_max)); + vstrbq_p_s32(out, out_0, p); + + out_1 = arm_requantize_mve_32x4(out_1, mult, shift); + out_1 = vaddq_n_s32(out_1, out_offset); + out_1 = vmaxq_s32(out_1, vdupq_n_s32(activation_min)); + out_1 = vminq_s32(out_1, vdupq_n_s32(activation_max)); + vstrbq_p_s32(out + num_ch, out_1, p); + + out_2 = arm_requantize_mve_32x4(out_2, mult, shift); + out_2 = vaddq_n_s32(out_2, out_offset); + out_2 = vmaxq_s32(out_2, vdupq_n_s32(activation_min)); + out_2 = vminq_s32(out_2, vdupq_n_s32(activation_max)); + vstrbq_p_s32(out + 2 * num_ch, out_2, p); + + out_3 = arm_requantize_mve_32x4(out_3, mult, shift); + out_3 = vaddq_n_s32(out_3, out_offset); + out_3 = vmaxq_s32(out_3, vdupq_n_s32(activation_min)); + out_3 = vminq_s32(out_3, vdupq_n_s32(activation_max)); + vstrbq_p_s32(out + 3 * num_ch, out_3, p); + } + + const int tail_ch = num_ch & 0x3; + if (tail_ch != 0) + { + out -= (4 - tail_ch); + } + + return out + (3 * num_ch); +#else + (void)lhs; + (void)rhs; + (void)input_offset; + (void)num_ch; + (void)out_shift; + (void)out_mult; + (void)out_offset; + (void)activation_min; + (void)activation_max; + (void)row_x_col; + (void)output_bias; + (void)out; + return NULL; +#endif +} + +/** + * @} end of NNBasicMath group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_mat_mul_core_1x_s8.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_mat_mul_core_1x_s8.c new file mode 100755 index 00000000000..6077658696c --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_mat_mul_core_1x_s8.c @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mat_mul_core_1x_s8.c + * Description: General Matrix-multiplication function + * + * $Date: January 20, 2020 + * $Revision: V.1.0.1 + * + * Target Processor: Cortex-M cores + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup NNBasicMath + * @{ + */ + +/* + * s8 matrix multiplication to process 1 row + * + * Refer header file for details. + * + */ + +arm_status arm_nn_mat_mul_core_1x_s8(int32_t row_elements, + const int8_t *row_base, + const int8_t *col_base, + int32_t *const sum_col, + int32_t *const output) +{ + int32_t acc_n0 = 0; + int32_t sum_tmp = 0; + +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + + __asm volatile ( + " vldrb.8 q0, [%[col]], 16 \n" + " wlstp.8 lr, %[cnt], 1f \n" + "2: \n" + " vaddva.s8 %[sum], q0 \n" + " vldrb.8 q1, [%[row0]], 16 \n" + " vmladava.s8 %[out0], q0, q1 \n" + " vldrb.8 q0, [%[col]], 16 \n" + " letp lr, 2b \n" + "1: \n" + :[col] "+r"(col_base) + ,[sum] "+Te"(sum_tmp) + ,[row0] "+r"(row_base) + ,[out0] "+Te"(acc_n0) + :[cnt] "r"(row_elements) + :"q0","q1", "memory", "r14"); +#else + for (int i = 0; i < row_elements; i++) + { + sum_tmp += col_base[i]; + acc_n0 += row_base[i] * col_base[i]; + } +#endif + + *sum_col = sum_tmp; + *output = acc_n0; + return ARM_MATH_SUCCESS; +} + +/** + * @} end of NNBasicMath group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_mat_mul_core_4x_s8.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_mat_mul_core_4x_s8.c new file mode 100755 index 00000000000..39ad5291318 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_mat_mul_core_4x_s8.c @@ -0,0 +1,118 @@ +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mat_mul_core_4x_s8.c + * Description: General matrix multiplication function for MVE extension + * + * $Date: January 20, 2020 + * $Revision: V.2.0.0 + * + * Target Processor: Cortex-M cores + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup NNBasicMath + * @{ + */ + +/* + * s8 matrix multiplication to process 4 rows and one column + * + * Refer header file for details. + * + */ +arm_status arm_nn_mat_mul_core_4x_s8(const int32_t row_elements, + const int32_t offset, + const int8_t *row_base, + const int8_t *col_base, + int32_t *const sum_col, + int32_t *const output) +{ + int32_t acc_n0 = 0; + int32_t acc_n1 = 0; + int32_t acc_n2 = 0; + int32_t acc_n3 = 0; + + const int8_t *ip_row_0 = row_base; + const int8_t *ip_row_1 = row_base + offset; + const int8_t *ip_row_2 = row_base + (2 * offset); + const int8_t *ip_row_3 = row_base + (3 * offset); + int32_t sum_tmp = 0; + +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + __asm volatile( + " vldrb.8 q0, [%[col]], 16 \n" + " wlstp.8 lr, %[cnt], 1f \n" + "2: \n" + " vaddva.s8 %[sum], q0 \n" + " vldrb.8 q1, [%[row0]], 16 \n" + " vmladava.s8 %[out0], q0, q1 \n" + " vldrb.8 q2, [%[row1]], 16 \n" + " vmladava.s8 %[out1], q0, q2 \n" + " vldrb.8 q3, [%[row2]], 16 \n" + " vmladava.s8 %[out2], q0, q3 \n" + " vldrb.8 q4, [%[row3]], 16 \n" + " vmladava.s8 %[out3], q0, q4 \n" + " vldrb.8 q0, [%[col]], 16 \n" + " letp lr, 2b \n" + "1: \n" + :[col] "+r"(col_base) + ,[sum] "+Te"(sum_tmp) + ,[row0] "+r"(ip_row_0) + ,[row1] "+r"(ip_row_1) + ,[row2] "+r"(ip_row_2) + ,[row3] "+r"(ip_row_3) + ,[out0] "+Te"(acc_n0) + ,[out1] "+Te"(acc_n1) + ,[out2] "+Te"(acc_n2) + ,[out3] "+Te"(acc_n3) + : [cnt] "r"(row_elements) + : "q0", "q1", "q2", "q3", "q4", "memory", "r14"); +#else + for (int i = 0; i < row_elements; i++) + { + int32_t col = col_base[i]; + sum_tmp += col; + acc_n0 += ip_row_0[i] * col; + acc_n1 += ip_row_1[i] * col; + acc_n2 += ip_row_2[i] * col; + acc_n3 += ip_row_3[i] * col; + } +#endif + output[0] = acc_n0; + output[1] = acc_n1; + output[2] = acc_n2; + output[3] = acc_n3; + + *sum_col = sum_tmp; + + return ARM_MATH_SUCCESS; +} + +/** + * @} end of NNBasicMath group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_mat_mult_nt_t_s8.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_mat_mult_nt_t_s8.c new file mode 100755 index 00000000000..4f8006fdeb7 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_mat_mult_nt_t_s8.c @@ -0,0 +1,572 @@ +/* + * Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mat_mult_s8_nt_t_s8 + * Description: Matrix multiplication support function with the right-hand-side (rhs) matrix transposed + * + * $Date: March 17 2020 + * $Revision: V.1.0.1 + * + * Target Processor: Cortex-M + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" +#include "arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup NNBasicMath + * @{ + */ + +/* + * s8 matrix multiplication with the right-hand-side matrix transposed + * + * Refer header file for details. + * + */ +arm_status arm_nn_mat_mult_nt_t_s8(const q7_t *lhs, + const q7_t *rhs, + const q31_t *bias, + q7_t *dst, + const int32_t *dst_multipliers, + const int32_t *dst_shifts, + const int32_t lhs_rows, + const int32_t rhs_rows, + const int32_t rhs_cols, + const int32_t lhs_offset, + const int32_t dst_offset, + const int32_t activation_min, + const int32_t activation_max) +{ +#if defined(ARM_MATH_DSP) + const int32_t off0 = rhs_cols - 4; + + for (int32_t rhs_rows_idx = 0; rhs_rows_idx <= (rhs_rows - 2); rhs_rows_idx += 2) + { + const q7_t *lhs_ptr = &lhs[0]; + q7_t *dst_ptr = &dst[0]; + + q31_t lhs_offset_contribution0 = 0; + q31_t lhs_offset_contribution1 = 0; + + for (int32_t x = 0; x < rhs_cols; ++x) + { + lhs_offset_contribution0 += rhs[x]; + lhs_offset_contribution1 += rhs[x + rhs_cols]; + } + + lhs_offset_contribution0 *= lhs_offset; + lhs_offset_contribution1 *= lhs_offset; + + lhs_offset_contribution0 += bias[rhs_rows_idx]; + lhs_offset_contribution1 += bias[rhs_rows_idx + 1]; + + int32_t lhs_rows_idx = lhs_rows >> 1; + + while (lhs_rows_idx) + { + const q7_t *rhs_ptr = &rhs[0]; + + q31_t res00 = lhs_offset_contribution0; + q31_t res01 = lhs_offset_contribution1; + q31_t res10 = lhs_offset_contribution0; + q31_t res11 = lhs_offset_contribution1; + + int32_t rhs_cols_idx = 0; + + q31_t val0, val1, val2, val3, val4, val5; + + for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) + { + val1 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); + val2 = __SXTB16(val1); + val0 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); + val3 = __SXTB16(val0); + val4 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]); + val1 = __SXTB16_RORn(val1, 8); + val0 = __SXTB16_RORn(val0, 8); + + // 4 x MAC res00, res01 + res00 = __SMLAD(val3, val2, res00); + val5 = __SXTB16(val4); + res00 = __SMLAD(val0, val1, res00); + val4 = __SXTB16_RORn(val4, 8); + res01 = __SMLAD(val3, val5, res01); + res01 = __SMLAD(val0, val4, res01); + + // 4 x MAC res10, res11 + val0 = arm_nn_read_q7x4((const q7_t *)&lhs_ptr[off0]); + val3 = __SXTB16(val0); + val0 = __SXTB16_RORn(val0, 8); + res10 = __SMLAD(val3, val2, res10); + res11 = __SMLAD(val3, val5, res11); + res10 = __SMLAD(val0, val1, res10); + val1 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); + res11 = __SMLAD(val0, val4, res11); + + val4 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]); + val2 = __SXTB16(val1); + val0 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); + val3 = __SXTB16(val0); + val1 = __SXTB16_RORn(val1, 8); + val0 = __SXTB16_RORn(val0, 8); + + // 4 x MAC res00, res01 + res00 = __SMLAD(val3, val2, res00); + val5 = __SXTB16(val4); + res00 = __SMLAD(val0, val1, res00); + val4 = __SXTB16_RORn(val4, 8); + res01 = __SMLAD(val3, val5, res01); + res01 = __SMLAD(val0, val4, res01); + + // 4 x MAC res10, res11 + val0 = arm_nn_read_q7x4((const q7_t *)&lhs_ptr[off0]); + val3 = __SXTB16(val0); + val0 = __SXTB16_RORn(val0, 8); + res10 = __SMLAD(val3, val2, res10); + res11 = __SMLAD(val3, val5, res11); + res10 = __SMLAD(val0, val1, res10); + val1 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); + res11 = __SMLAD(val0, val4, res11); + + val4 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]); + val2 = __SXTB16(val1); + val0 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); + val3 = __SXTB16(val0); + val1 = __SXTB16_RORn(val1, 8); + val0 = __SXTB16_RORn(val0, 8); + + // 4 x MAC res00, res01 + res00 = __SMLAD(val3, val2, res00); + val5 = __SXTB16(val4); + res00 = __SMLAD(val0, val1, res00); + val4 = __SXTB16_RORn(val4, 8); + res01 = __SMLAD(val3, val5, res01); + res01 = __SMLAD(val0, val4, res01); + + // 4 x MAC res10, res11 + val0 = arm_nn_read_q7x4((const q7_t *)&lhs_ptr[off0]); + val3 = __SXTB16(val0); + val0 = __SXTB16_RORn(val0, 8); + res10 = __SMLAD(val3, val2, res10); + res11 = __SMLAD(val3, val5, res11); + res10 = __SMLAD(val0, val1, res10); + val1 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); + res11 = __SMLAD(val0, val4, res11); + + val4 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]); + val2 = __SXTB16(val1); + val0 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); + val3 = __SXTB16(val0); + val1 = __SXTB16_RORn(val1, 8); + val0 = __SXTB16_RORn(val0, 8); + + // 4 x MAC res00, res01 + res00 = __SMLAD(val3, val2, res00); + val5 = __SXTB16(val4); + res00 = __SMLAD(val0, val1, res00); + val4 = __SXTB16_RORn(val4, 8); + res01 = __SMLAD(val3, val5, res01); + res01 = __SMLAD(val0, val4, res01); + + // 4 x MAC res10, res11 + val0 = arm_nn_read_q7x4((const q7_t *)&lhs_ptr[off0]); + val3 = __SXTB16(val0); + val0 = __SXTB16_RORn(val0, 8); + res10 = __SMLAD(val3, val2, res10); + res11 = __SMLAD(val3, val5, res11); + res10 = __SMLAD(val0, val1, res10); + res11 = __SMLAD(val0, val4, res11); + } + + for (; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + q7_t rhs_value0 = rhs_ptr[0]; + q7_t rhs_value1 = rhs_ptr[rhs_cols]; + q7_t lhs_value = lhs_ptr[0]; + + res00 += lhs_value * rhs_value0; + res01 += lhs_value * rhs_value1; + + lhs_value = lhs_ptr[rhs_cols]; + res10 += lhs_value * rhs_value0; + res11 += lhs_value * rhs_value1; + + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + res10 = arm_nn_requantize(res10, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res11 = arm_nn_requantize(res11, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + res10 += dst_offset; + res11 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + res10 = MAX(res10, activation_min); + res10 = MIN(res10, activation_max); + res11 = MAX(res11, activation_min); + res11 = MIN(res11, activation_max); + + dst_ptr[0] = (q7_t)res00; + dst_ptr[1] = (q7_t)res01; + dst_ptr += rhs_rows; + dst_ptr[0] = (q7_t)res10; + dst_ptr[1] = (q7_t)res11; + dst_ptr += rhs_rows; + + lhs_ptr += rhs_cols; + + lhs_rows_idx--; + } + + // Left-over rows + if (lhs_rows % 2) + { + const q7_t *rhs_ptr = &rhs[0]; + + q31_t res00 = lhs_offset_contribution0; + q31_t res01 = lhs_offset_contribution1; + + int32_t rhs_cols_idx = 0; + + q31_t val0, val1, val2, val3, val4, val5; + for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) + { + val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); + val1 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]); + val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); + val3 = __SXTB16(val0); + val5 = __SXTB16(val2); + val4 = __SXTB16(val1); + val0 = __SXTB16_RORn(val0, 8); + val2 = __SXTB16_RORn(val2, 8); + val1 = __SXTB16_RORn(val1, 8); + + // 4 x MAC res00, res01 + res00 = __SMLAD(val5, val3, res00); + res00 = __SMLAD(val2, val0, res00); + res01 = __SMLAD(val5, val4, res01); + res01 = __SMLAD(val2, val1, res01); + + val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); + val1 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]); + val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); + val3 = __SXTB16(val0); + val5 = __SXTB16(val2); + val4 = __SXTB16(val1); + val0 = __SXTB16_RORn(val0, 8); + val2 = __SXTB16_RORn(val2, 8); + val1 = __SXTB16_RORn(val1, 8); + + // 4 x MAC res00, res01 + res00 = __SMLAD(val5, val3, res00); + res00 = __SMLAD(val2, val0, res00); + res01 = __SMLAD(val5, val4, res01); + res01 = __SMLAD(val2, val1, res01); + + val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); + val1 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]); + val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); + val3 = __SXTB16(val0); + val5 = __SXTB16(val2); + val4 = __SXTB16(val1); + val0 = __SXTB16_RORn(val0, 8); + val2 = __SXTB16_RORn(val2, 8); + val1 = __SXTB16_RORn(val1, 8); + + // 4 x MAC res00, res01 + res00 = __SMLAD(val5, val3, res00); + res00 = __SMLAD(val2, val0, res00); + res01 = __SMLAD(val5, val4, res01); + res01 = __SMLAD(val2, val1, res01); + + val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); + val1 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]); + val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); + val3 = __SXTB16(val0); + val5 = __SXTB16(val2); + val4 = __SXTB16(val1); + val0 = __SXTB16_RORn(val0, 8); + val2 = __SXTB16_RORn(val2, 8); + val1 = __SXTB16_RORn(val1, 8); + + // 4 x MAC res00, res01 + res00 = __SMLAD(val5, val3, res00); + res00 = __SMLAD(val2, val0, res00); + res01 = __SMLAD(val5, val4, res01); + res01 = __SMLAD(val2, val1, res01); + } + + // Left-over accumulations + for (; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + q7_t rhs_value0 = rhs_ptr[0]; + q7_t rhs_value1 = rhs_ptr[rhs_cols]; + q7_t lhs_value = lhs_ptr[0]; + + res00 += lhs_value * rhs_value0; + res01 += lhs_value * rhs_value1; + + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + + dst_ptr[0] = (q7_t)res00; + dst_ptr[1] = (q7_t)res01; + } + + rhs += 2 * rhs_cols; + dst += 2; + } + + if (rhs_rows % 2) + { + const q7_t *lhs_ptr = &lhs[0]; + q7_t *dst_ptr = &dst[0]; + + for (int32_t lhs_rows_idx = 0; lhs_rows_idx < lhs_rows; ++lhs_rows_idx) + { + const q7_t *rhs_ptr = &rhs[0]; + q31_t res00 = bias[rhs_rows - 1]; + + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + q31_t rhs_value = rhs_ptr[0]; + q31_t lhs_value = lhs_ptr[0] + lhs_offset; + + res00 += lhs_value * rhs_value; + + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows - 1], dst_shifts[rhs_rows - 1]); + + // Add offset + res00 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + + dst_ptr[0] = (q7_t)res00; + dst_ptr += rhs_rows; + } + } +#else + for (int32_t rhs_rows_idx = 0; rhs_rows_idx <= (rhs_rows - 2); rhs_rows_idx += 2) + { + const q7_t *lhs_ptr = &lhs[0]; + q7_t *dst_ptr = &dst[0]; + + q31_t lhs_offset_contribution0 = 0; + q31_t lhs_offset_contribution1 = 0; + + for (int32_t x = 0; x < rhs_cols; ++x) + { + lhs_offset_contribution0 += rhs[x]; + lhs_offset_contribution1 += rhs[x + rhs_cols]; + } + + lhs_offset_contribution0 *= lhs_offset; + lhs_offset_contribution1 *= lhs_offset; + + lhs_offset_contribution0 += bias[rhs_rows_idx]; + lhs_offset_contribution1 += bias[rhs_rows_idx + 1]; + + int32_t lhs_rows_idx = lhs_rows >> 1; + + while (lhs_rows_idx) + { + const q7_t *rhs_ptr = &rhs[0]; + + q31_t res00 = lhs_offset_contribution0; + q31_t res01 = lhs_offset_contribution1; + q31_t res10 = lhs_offset_contribution0; + q31_t res11 = lhs_offset_contribution1; + + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + q7_t rhs_value0 = rhs_ptr[0]; + q7_t rhs_value1 = rhs_ptr[rhs_cols]; + q7_t lhs_value = lhs_ptr[0]; + + res00 += lhs_value * rhs_value0; + res01 += lhs_value * rhs_value1; + + lhs_value = lhs_ptr[rhs_cols]; + res10 += lhs_value * rhs_value0; + res11 += lhs_value * rhs_value1; + + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + res10 = arm_nn_requantize(res10, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res11 = arm_nn_requantize(res11, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + res10 += dst_offset; + res11 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + res10 = MAX(res10, activation_min); + res10 = MIN(res10, activation_max); + res11 = MAX(res11, activation_min); + res11 = MIN(res11, activation_max); + + dst_ptr[0] = (q7_t)res00; + dst_ptr[1] = (q7_t)res01; + dst_ptr += rhs_rows; + dst_ptr[0] = (q7_t)res10; + dst_ptr[1] = (q7_t)res11; + dst_ptr += rhs_rows; + + lhs_ptr += rhs_cols; + + lhs_rows_idx--; + } + + // Left-over rows + if (lhs_rows % 2) + { + const q7_t *rhs_ptr = &rhs[0]; + + q31_t res00 = lhs_offset_contribution0; + q31_t res01 = lhs_offset_contribution1; + + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + q7_t rhs_value0 = rhs_ptr[0]; + q7_t rhs_value1 = rhs_ptr[rhs_cols]; + q7_t lhs_value = lhs_ptr[0]; + + res00 += lhs_value * rhs_value0; + res01 += lhs_value * rhs_value1; + + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]); + res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + + dst_ptr[0] = (q7_t)res00; + dst_ptr[1] = (q7_t)res01; + } + + rhs += 2 * rhs_cols; + dst += 2; + } + + if (rhs_rows % 2) + { + const q7_t *lhs_ptr = &lhs[0]; + q7_t *dst_ptr = &dst[0]; + + for (int32_t lhs_rows_idx = 0; lhs_rows_idx < lhs_rows; ++lhs_rows_idx) + { + const q7_t *rhs_ptr = &rhs[0]; + q31_t res00 = bias[rhs_rows - 1]; + + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + q31_t rhs_value = rhs_ptr[0]; + q31_t lhs_value = lhs_ptr[0] + lhs_offset; + + res00 += lhs_value * rhs_value; + + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows - 1], dst_shifts[rhs_rows - 1]); + + // Add offset + res00 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + + dst_ptr[0] = (q7_t)res00; + dst_ptr += rhs_rows; + } + } +#endif + return ARM_MATH_SUCCESS; +} + +/** + * @} end of NNBasicMath group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_mult_q15.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_mult_q15.c new file mode 100755 index 00000000000..69b0ece753b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_mult_q15.c @@ -0,0 +1,145 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mult_q15.c + * Description: Q15 vector multiplication with variable output shifts + * + * $Date: 13. July 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_nnfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup NNBasicMath + * @{ + */ + + +/** + * @brief Q7 vector multiplication with variable output shifts + * @param[in] *pSrcA pointer to the first input vector + * @param[in] *pSrcB pointer to the second input vector + * @param[out] *pDst pointer to the output vector + * @param[in] out_shift amount of right-shift for output + * @param[in] blockSize number of samples in each vector + * + * Scaling and Overflow Behavior: + * \par + * The function uses saturating arithmetic. + * Results outside of the allowable Q15 range [0x8000 0x7FFF] will be saturated. + */ + +void arm_nn_mult_q15( + q15_t * pSrcA, + q15_t * pSrcB, + q15_t * pDst, + const uint16_t out_shift, + uint32_t blockSize) +{ + uint32_t blkCnt; /* loop counters */ + +#if defined (ARM_MATH_DSP) + +/* Run the below code for Cortex-M4 and Cortex-M3 */ + q31_t inA1, inA2, inB1, inB2; /* temporary input variables */ + q15_t out1, out2, out3, out4; /* temporary output variables */ + q31_t mul1, mul2, mul3, mul4; /* temporary variables */ + + /* loop Unrolling */ + blkCnt = blockSize >> 2U; + + /* First part of the processing with loop unrolling. Compute 4 outputs at a time. + ** a second loop below computes the remaining 1 to 3 samples. */ + while (blkCnt > 0U) + { + /* read two samples at a time from sourceA */ + inA1 = arm_nn_read_q15x2_ia((const q15_t **)&pSrcA); + /* read two samples at a time from sourceB */ + inB1 = arm_nn_read_q15x2_ia((const q15_t **)&pSrcB); + /* read two samples at a time from sourceA */ + inA2 = arm_nn_read_q15x2_ia((const q15_t **)&pSrcA); + /* read two samples at a time from sourceB */ + inB2 = arm_nn_read_q15x2_ia((const q15_t **)&pSrcB); + + /* multiply mul = sourceA * sourceB */ + mul1 = (q31_t) ((q15_t) (inA1 >> 16) * (q15_t) (inB1 >> 16)); + mul2 = (q31_t) ((q15_t) inA1 * (q15_t) inB1); + mul3 = (q31_t) ((q15_t) (inA2 >> 16) * (q15_t) (inB2 >> 16)); + mul4 = (q31_t) ((q15_t) inA2 * (q15_t) inB2); + + /* saturate result to 16 bit */ + out1 = (q15_t) __SSAT((mul1 + NN_ROUND(out_shift)) >> out_shift, 16); + out2 = (q15_t) __SSAT((mul2 + NN_ROUND(out_shift)) >> out_shift, 16); + out3 = (q15_t) __SSAT((mul3 + NN_ROUND(out_shift)) >> out_shift, 16); + out4 = (q15_t) __SSAT((mul4 + NN_ROUND(out_shift)) >> out_shift, 16); + + /* store the result */ +#ifndef ARM_MATH_BIG_ENDIAN + + *__SIMD32(pDst)++ = __PKHBT(out2, out1, 16); + *__SIMD32(pDst)++ = __PKHBT(out4, out3, 16); + +#else + + *__SIMD32(pDst)++ = __PKHBT(out2, out1, 16); + *__SIMD32(pDst)++ = __PKHBT(out4, out3, 16); + +#endif /* #ifndef ARM_MATH_BIG_ENDIAN */ + + /* Decrement the blockSize loop counter */ + blkCnt--; + } + + /* If the blockSize is not a multiple of 4, compute any remaining output samples here. + ** No loop unrolling is used. */ + blkCnt = blockSize % 0x4U; + +#else + + /* Run the below code for Cortex-M0 */ + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; + +#endif /* #if defined (ARM_MATH_DSP) */ + + + while (blkCnt > 0U) + { + /* C = A * B */ + /* Multiply the inputs and store the result in the destination buffer */ + *pDst++ = (q15_t) __SSAT((((q31_t) (*pSrcA++) * (*pSrcB++) + NN_ROUND(out_shift)) >> out_shift), 16); + + /* Decrement the blockSize loop counter */ + blkCnt--; + } +} + +/** + * @} end of NNBasicMath group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_mult_q7.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_mult_q7.c new file mode 100755 index 00000000000..192c0880a43 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_mult_q7.c @@ -0,0 +1,118 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mult_q7.c + * Description: Q7 vector multiplication with variable output shifts + * + * $Date: 13. July 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_nnfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup NNBasicMath + * @{ + */ + +/** + * @brief Q7 vector multiplication with variable output shifts + * @param[in] *pSrcA pointer to the first input vector + * @param[in] *pSrcB pointer to the second input vector + * @param[out] *pDst pointer to the output vector + * @param[in] out_shift amount of right-shift for output + * @param[in] blockSize number of samples in each vector + * + * Scaling and Overflow Behavior: + * \par + * The function uses saturating arithmetic. + * Results outside of the allowable Q7 range [0x80 0x7F] will be saturated. + */ + +void arm_nn_mult_q7( + q7_t * pSrcA, + q7_t * pSrcB, + q7_t * pDst, + const uint16_t out_shift, + uint32_t blockSize) +{ + uint32_t blkCnt; /* loop counters */ + +#if defined (ARM_MATH_DSP) + +/* Run the below code for Cortex-M4 and Cortex-M3 */ + q7_t out1, out2, out3, out4; /* Temporary variables to store the product */ + + /* loop Unrolling */ + blkCnt = blockSize >> 2U; + + /* First part of the processing with loop unrolling. Compute 4 outputs at a time. + ** a second loop below computes the remaining 1 to 3 samples. */ + while (blkCnt > 0U) + { + /* C = A * B */ + /* Multiply the inputs and store the results in temporary variables */ + out1 = (q7_t) __SSAT((((q15_t) (*pSrcA++) * (*pSrcB++) + NN_ROUND(out_shift)) >> out_shift), 8); + out2 = (q7_t) __SSAT((((q15_t) (*pSrcA++) * (*pSrcB++) + NN_ROUND(out_shift)) >> out_shift), 8); + out3 = (q7_t) __SSAT((((q15_t) (*pSrcA++) * (*pSrcB++) + NN_ROUND(out_shift)) >> out_shift), 8); + out4 = (q7_t) __SSAT((((q15_t) (*pSrcA++) * (*pSrcB++) + NN_ROUND(out_shift)) >> out_shift), 8); + + /* Store the results of 4 inputs in the destination buffer in single cycle by packing */ + *__SIMD32(pDst)++ = __PACKq7(out1, out2, out3, out4); + + /* Decrement the blockSize loop counter */ + blkCnt--; + } + + /* If the blockSize is not a multiple of 4, compute any remaining output samples here. + ** No loop unrolling is used. */ + blkCnt = blockSize % 0x4U; + +#else + + /* Run the below code for Cortex-M0 */ + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; + +#endif /* #if defined (ARM_MATH_DSP) */ + + + while (blkCnt > 0U) + { + /* C = A * B */ + /* Multiply the inputs and store the result in the destination buffer */ + *pDst++ = (q7_t) __SSAT((((q15_t) (*pSrcA++) * (*pSrcB++) + NN_ROUND(out_shift)) >> out_shift), 8); + + /* Decrement the blockSize loop counter */ + blkCnt--; + } +} + +/** + * @} end of NNBasicMath group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_vec_mat_mult_t_s8.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_vec_mat_mult_t_s8.c new file mode 100755 index 00000000000..3c299596685 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nn_vec_mat_mult_t_s8.c @@ -0,0 +1,356 @@ +/* + * Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_vec_mat_mult_t_s8 + * Description: s8 vector by matrix (transposed) multiplication + * + * $Date: March 17, 2020 + * $Revision: V.1.0.1 + * + * Target Processor: Cortex-M + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" +#include "arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup NNBasicMath + * @{ + */ + +/* + * s8 vector by matrix (transposed) multiplication + * + * Refer header file for details. + * + */ +arm_status arm_nn_vec_mat_mult_t_s8(const q7_t *lhs, + const q7_t *rhs, + const q31_t *bias, + q7_t *dst, + const int32_t lhs_offset, + const int32_t rhs_offset, + const int32_t dst_offset, + const int32_t dst_multiplier, + const int32_t dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t activation_min, + const int32_t activation_max) +{ +#if defined(ARM_MATH_DSP) + const int32_t off0 = rhs_cols - 4; + const int16_t lhs_offset_s16 = lhs_offset; + const int16_t rhs_offset_s16 = rhs_offset; + + const uint32_t lhs_offset_s16x2 = __PKHBT(lhs_offset_s16, lhs_offset_s16, 16); + const uint32_t rhs_offset_s16x2 = __PKHBT(rhs_offset_s16, rhs_offset_s16, 16); + + for (int32_t rhs_rows_idx = 0; rhs_rows_idx <= (rhs_rows - 2); rhs_rows_idx += 2) + { + const q7_t *lhs_ptr = &lhs[0]; + const q7_t *rhs_ptr = &rhs[0]; + + q31_t res00 = *bias++; + q31_t res01 = *bias++; + + int32_t rhs_cols_idx = 0; + + q31_t val0, val1, val2, val3, val4, val5; + for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) + { + // Read 4 x int8 values from the RHS matrix + val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); + val2 = __SXTAB16(rhs_offset_s16x2, val0); + // Read 4 x int8 values from the LHS vector + val1 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); + val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); + val3 = __SXTAB16(lhs_offset_s16x2, val1); + // Read 4 x int8 values from the RHS matrix + val4 = arm_nn_read_q7x4((const q7_t *)rhs_ptr + off0); + val1 = __SXTAB16(lhs_offset_s16x2, __ROR(val1, 8)); + + // Perform the accumulations + res00 = __SMLAD(val3, val2, res00); + val5 = __SXTAB16(rhs_offset_s16x2, val4); + res00 = __SMLAD(val1, val0, res00); + val4 = __SXTAB16(rhs_offset_s16x2, __ROR(val4, 8)); + // Read 4 x int8 values from the RHS matrix + val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); + res01 = __SMLAD(val3, val5, res01); + res01 = __SMLAD(val1, val4, res01); + + val2 = __SXTAB16(rhs_offset_s16x2, val0); + // Read 4 x int8 values from the LHS vector + val1 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); + val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); + val3 = __SXTAB16(lhs_offset_s16x2, val1); + // Read 4 x int8 values from the RHS matrix + val4 = arm_nn_read_q7x4((const q7_t *)rhs_ptr + off0); + val1 = __SXTAB16(lhs_offset_s16x2, __ROR(val1, 8)); + + // Perform the accumulations + res00 = __SMLAD(val3, val2, res00); + val5 = __SXTAB16(rhs_offset_s16x2, val4); + res00 = __SMLAD(val1, val0, res00); + val4 = __SXTAB16(rhs_offset_s16x2, __ROR(val4, 8)); + // Read 4 x int8 values from the RHS matrix + val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); + res01 = __SMLAD(val3, val5, res01); + res01 = __SMLAD(val1, val4, res01); + + val2 = __SXTAB16(rhs_offset_s16x2, val0); + // Read 4 x int8 values from the LHS vector + val1 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); + val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); + val3 = __SXTAB16(lhs_offset_s16x2, val1); + // Read 4 x int8 values from the RHS matrix + val4 = arm_nn_read_q7x4((const q7_t *)rhs_ptr + off0); + val1 = __SXTAB16(lhs_offset_s16x2, __ROR(val1, 8)); + + // Perform the accumulations + res00 = __SMLAD(val3, val2, res00); + val5 = __SXTAB16(rhs_offset_s16x2, val4); + res00 = __SMLAD(val1, val0, res00); + val4 = __SXTAB16(rhs_offset_s16x2, __ROR(val4, 8)); + // Read 4 x int8 values from the RHS matrix + val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); + res01 = __SMLAD(val3, val5, res01); + res01 = __SMLAD(val1, val4, res01); + + val2 = __SXTAB16(rhs_offset_s16x2, val0); + // Read 4 x int8 values from the LHS vector + val1 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); + val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); + val3 = __SXTAB16(lhs_offset_s16x2, val1); + // Read 4 x int8 values from the RHS matrix + val4 = arm_nn_read_q7x4((const q7_t *)rhs_ptr + off0); + val1 = __SXTAB16(lhs_offset_s16x2, __ROR(val1, 8)); + + // Perform the accumulations + res00 = __SMLAD(val3, val2, res00); + val5 = __SXTAB16(rhs_offset_s16x2, val4); + res00 = __SMLAD(val1, val0, res00); + val4 = __SXTAB16(rhs_offset_s16x2, __ROR(val4, 8)); + res01 = __SMLAD(val3, val5, res01); + res01 = __SMLAD(val1, val4, res01); + } + + for (; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + q31_t rhs_value0 = rhs_ptr[0] + rhs_offset; + q31_t rhs_value1 = rhs_ptr[rhs_cols] + rhs_offset; + q31_t lhs_value = lhs_ptr[0] + lhs_offset; + + res00 += lhs_value * rhs_value0; + res01 += lhs_value * rhs_value1; + + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift); + res01 = arm_nn_requantize(res01, dst_multiplier, dst_shift); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + + *dst++ = (q7_t)res00; + *dst++ = (q7_t)res01; + + rhs += 2 * rhs_cols; + } + + if (rhs_rows % 2) + { + const q7_t *lhs_ptr = &lhs[0]; + const q7_t *rhs_ptr = &rhs[0]; + + q31_t res00 = *bias++; + + int32_t rhs_cols_idx = 0; + + q31_t val0, val1, val2, val3; + for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) + { + val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); + val1 = __SXTAB16(rhs_offset_s16x2, val0); + val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); + val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); + val3 = __SXTAB16(lhs_offset_s16x2, val2); + val2 = __SXTAB16(lhs_offset_s16x2, __ROR(val2, 8)); + + // Partial accumulations + res00 = __SMLAD(val3, val1, res00); + res00 = __SMLAD(val2, val0, res00); + + val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); + val1 = __SXTAB16(rhs_offset_s16x2, val0); + val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); + val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); + val3 = __SXTAB16(lhs_offset_s16x2, val2); + val2 = __SXTAB16(lhs_offset_s16x2, __ROR(val2, 8)); + + // Partial accumulations + res00 = __SMLAD(val3, val1, res00); + res00 = __SMLAD(val2, val0, res00); + + val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); + val1 = __SXTAB16(rhs_offset_s16x2, val0); + val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); + val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); + val3 = __SXTAB16(lhs_offset_s16x2, val2); + val2 = __SXTAB16(lhs_offset_s16x2, __ROR(val2, 8)); + + // Partial accumulations + res00 = __SMLAD(val3, val1, res00); + res00 = __SMLAD(val2, val0, res00); + + val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); + val1 = __SXTAB16(rhs_offset_s16x2, val0); + val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); + val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); + val3 = __SXTAB16(lhs_offset_s16x2, val2); + val2 = __SXTAB16(lhs_offset_s16x2, __ROR(val2, 8)); + + // Partial accumulations + res00 = __SMLAD(val3, val1, res00); + res00 = __SMLAD(val2, val0, res00); + } + + for (; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + q31_t rhs_value0 = rhs_ptr[0] + rhs_offset; + q31_t lhs_value = lhs_ptr[0] + lhs_offset; + + res00 += lhs_value * rhs_value0; + + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift); + + // Add offset + res00 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + + *dst = (q7_t)res00; + } + +#else + + for (int32_t rhs_rows_idx = 0; rhs_rows_idx <= (rhs_rows - 2); rhs_rows_idx += 2) + { + const q7_t *lhs_ptr = &lhs[0]; + const q7_t *rhs_ptr = &rhs[0]; + + q31_t res00 = *bias++; + q31_t res01 = *bias++; + + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + q31_t rhs_value0 = rhs_ptr[0] + rhs_offset; + q31_t rhs_value1 = rhs_ptr[rhs_cols] + rhs_offset; + q31_t lhs_value = lhs_ptr[0] + lhs_offset; + + res00 += lhs_value * rhs_value0; + res01 += lhs_value * rhs_value1; + + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift); + res01 = arm_nn_requantize(res01, dst_multiplier, dst_shift); + + // Add offset + res00 += dst_offset; + res01 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + + *dst++ = (q7_t)res00; + *dst++ = (q7_t)res01; + + rhs += 2 * rhs_cols; + } + + if (rhs_rows % 2) + { + const q7_t *lhs_ptr = &lhs[0]; + const q7_t *rhs_ptr = &rhs[0]; + + q31_t res00 = *bias++; + + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + q31_t rhs_value0 = rhs_ptr[0] + rhs_offset; + q31_t lhs_value = lhs_ptr[0] + lhs_offset; + + res00 += lhs_value * rhs_value0; + + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift); + + // Add offset + res00 += dst_offset; + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + + *dst = (q7_t)res00; + } +#endif + + return ARM_MATH_SUCCESS; +} + +/** + * @} end of NNBasicMath group + */ \ No newline at end of file diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nntables.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nntables.c new file mode 100755 index 00000000000..c28f1a6126b --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_nntables.c @@ -0,0 +1,297 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nntables.c + * Description: Converts the elements of the Q7 vector to Q15 vector without left-shift + * + * $Date: 17. January 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_nnsupportfunctions.h" + +/** + * @brief tables for various activation functions + * + * This file include the declaration of common tables. + * Most of them are used for activation functions + * + * Assumption: + * Unified table: input is 3.x format, i.e, range of [-8, 8) + * sigmoid(8) = 0.9996646498695336 + * tanh(8) = 0.9999997749296758 + * The accuracy here should be good enough + * + * 2-stage HL table: + * + * The entire input range is divided into two parts: + * + * Low range table: 0x000x xxxx or 0x111x xxxx + * table entry will be the binary number excluding the first + * two digits, i.e., 0x0x xxxx or 0x1x xxxx + * + * + * + * High range table 0x0010 0000 -- 0x0111 1111 + * 0x1000 0000 -- 0x1101 1111 + * + * For positive numbers, table entry will be + * 0x0010 0000 -- 0x0111 1111 minus 0x0010 0000 + * i.e., 0x0000 0000 - 0x0101 11111 + * + * same thing for the negative numbers, table entry will be + * 0x1000 0000 -- 0x1101 1111 minux 0x0010 0000 + * i.e., 0x0110 0000 - 0x1011 1111 + */ + +const q7_t sigmoidTable_q7[256] = { + 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, + 0x50, 0x52, 0x53, 0x55, 0x57, 0x59, 0x5a, 0x5c, + 0x5e, 0x5f, 0x61, 0x62, 0x63, 0x65, 0x66, 0x67, + 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, + 0x71, 0x72, 0x72, 0x73, 0x74, 0x74, 0x75, 0x76, + 0x76, 0x77, 0x77, 0x78, 0x78, 0x79, 0x79, 0x7a, + 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, + 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7d, 0x7d, 0x7e, + 0x7e, 0x7e, 0x7e, 0x7e, 0x7e, 0x7e, 0x7e, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x06, + 0x06, 0x06, 0x07, 0x07, 0x08, 0x08, 0x09, 0x09, + 0x0a, 0x0a, 0x0b, 0x0c, 0x0c, 0x0d, 0x0e, 0x0e, + 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, + 0x17, 0x19, 0x1a, 0x1b, 0x1d, 0x1e, 0x1f, 0x21, + 0x22, 0x24, 0x26, 0x27, 0x29, 0x2b, 0x2d, 0x2e, + 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e, +}; + +const q15_t sigmoidTable_q15[256] = { + 0x4000, 0x4200, 0x43ff, 0x45fc, 0x47f5, 0x49eb, 0x4bdc, 0x4dc8, + 0x4fad, 0x518a, 0x5360, 0x552c, 0x56ef, 0x58a8, 0x5a57, 0x5bfb, + 0x5d93, 0x5f20, 0x60a1, 0x6216, 0x637f, 0x64db, 0x662b, 0x676f, + 0x68a6, 0x69d2, 0x6af1, 0x6c05, 0x6d0d, 0x6e09, 0x6efb, 0x6fe2, + 0x70be, 0x7190, 0x7258, 0x7316, 0x73cc, 0x7478, 0x751b, 0x75b7, + 0x764a, 0x76d6, 0x775b, 0x77d8, 0x784f, 0x78c0, 0x792a, 0x798f, + 0x79ee, 0x7a48, 0x7a9d, 0x7aed, 0x7b39, 0x7b80, 0x7bc4, 0x7c03, + 0x7c3f, 0x7c78, 0x7cad, 0x7ce0, 0x7d0f, 0x7d3c, 0x7d66, 0x7d8d, + 0x7db3, 0x7dd6, 0x7df7, 0x7e16, 0x7e33, 0x7e4f, 0x7e69, 0x7e81, + 0x7e98, 0x7eae, 0x7ec2, 0x7ed5, 0x7ee7, 0x7ef8, 0x7f08, 0x7f17, + 0x7f25, 0x7f32, 0x7f3e, 0x7f4a, 0x7f55, 0x7f5f, 0x7f69, 0x7f72, + 0x7f7b, 0x7f83, 0x7f8a, 0x7f91, 0x7f98, 0x7f9e, 0x7fa4, 0x7faa, + 0x7faf, 0x7fb4, 0x7fb8, 0x7fbd, 0x7fc1, 0x7fc5, 0x7fc8, 0x7fcc, + 0x7fcf, 0x7fd2, 0x7fd5, 0x7fd7, 0x7fda, 0x7fdc, 0x7fde, 0x7fe0, + 0x7fe2, 0x7fe4, 0x7fe6, 0x7fe7, 0x7fe9, 0x7fea, 0x7feb, 0x7fed, + 0x7fee, 0x7fef, 0x7ff0, 0x7ff1, 0x7ff2, 0x7ff3, 0x7ff4, 0x7ff4, + 0x000b, 0x000c, 0x000c, 0x000d, 0x000e, 0x000f, 0x0010, 0x0011, + 0x0012, 0x0013, 0x0015, 0x0016, 0x0017, 0x0019, 0x001a, 0x001c, + 0x001e, 0x0020, 0x0022, 0x0024, 0x0026, 0x0029, 0x002b, 0x002e, + 0x0031, 0x0034, 0x0038, 0x003b, 0x003f, 0x0043, 0x0048, 0x004c, + 0x0051, 0x0056, 0x005c, 0x0062, 0x0068, 0x006f, 0x0076, 0x007d, + 0x0085, 0x008e, 0x0097, 0x00a1, 0x00ab, 0x00b6, 0x00c2, 0x00ce, + 0x00db, 0x00e9, 0x00f8, 0x0108, 0x0119, 0x012b, 0x013e, 0x0152, + 0x0168, 0x017f, 0x0197, 0x01b1, 0x01cd, 0x01ea, 0x0209, 0x022a, + 0x024d, 0x0273, 0x029a, 0x02c4, 0x02f1, 0x0320, 0x0353, 0x0388, + 0x03c1, 0x03fd, 0x043c, 0x0480, 0x04c7, 0x0513, 0x0563, 0x05b8, + 0x0612, 0x0671, 0x06d6, 0x0740, 0x07b1, 0x0828, 0x08a5, 0x092a, + 0x09b6, 0x0a49, 0x0ae5, 0x0b88, 0x0c34, 0x0cea, 0x0da8, 0x0e70, + 0x0f42, 0x101e, 0x1105, 0x11f7, 0x12f3, 0x13fb, 0x150f, 0x162e, + 0x175a, 0x1891, 0x19d5, 0x1b25, 0x1c81, 0x1dea, 0x1f5f, 0x20e0, + 0x226d, 0x2405, 0x25a9, 0x2758, 0x2911, 0x2ad4, 0x2ca0, 0x2e76, + 0x3053, 0x3238, 0x3424, 0x3615, 0x380b, 0x3a04, 0x3c01, 0x3e00, +}; + +const q15_t sigmoidLTable_q15[128] = { + 0x4000, 0x4100, 0x4200, 0x42ff, 0x43ff, 0x44fd, 0x45fc, 0x46f9, + 0x47f5, 0x48f1, 0x49eb, 0x4ae5, 0x4bdc, 0x4cd3, 0x4dc8, 0x4ebb, + 0x4fad, 0x509c, 0x518a, 0x5276, 0x5360, 0x5447, 0x552c, 0x560f, + 0x56ef, 0x57cd, 0x58a8, 0x5981, 0x5a57, 0x5b2a, 0x5bfb, 0x5cc9, + 0x5d93, 0x5e5b, 0x5f20, 0x5fe2, 0x60a1, 0x615d, 0x6216, 0x62cc, + 0x637f, 0x642e, 0x64db, 0x6584, 0x662b, 0x66ce, 0x676f, 0x680c, + 0x68a6, 0x693d, 0x69d2, 0x6a63, 0x6af1, 0x6b7c, 0x6c05, 0x6c8a, + 0x6d0d, 0x6d8d, 0x6e09, 0x6e84, 0x6efb, 0x6f70, 0x6fe2, 0x7051, + 0x0f42, 0x0faf, 0x101e, 0x1090, 0x1105, 0x117c, 0x11f7, 0x1273, + 0x12f3, 0x1376, 0x13fb, 0x1484, 0x150f, 0x159d, 0x162e, 0x16c3, + 0x175a, 0x17f4, 0x1891, 0x1932, 0x19d5, 0x1a7c, 0x1b25, 0x1bd2, + 0x1c81, 0x1d34, 0x1dea, 0x1ea3, 0x1f5f, 0x201e, 0x20e0, 0x21a5, + 0x226d, 0x2337, 0x2405, 0x24d6, 0x25a9, 0x267f, 0x2758, 0x2833, + 0x2911, 0x29f1, 0x2ad4, 0x2bb9, 0x2ca0, 0x2d8a, 0x2e76, 0x2f64, + 0x3053, 0x3145, 0x3238, 0x332d, 0x3424, 0x351b, 0x3615, 0x370f, + 0x380b, 0x3907, 0x3a04, 0x3b03, 0x3c01, 0x3d01, 0x3e00, 0x3f00, +}; + +const q15_t sigmoidHTable_q15[192] = { + 0x70be, 0x7190, 0x7258, 0x7316, 0x73cc, 0x7478, 0x751b, 0x75b7, + 0x764a, 0x76d6, 0x775b, 0x77d8, 0x784f, 0x78c0, 0x792a, 0x798f, + 0x79ee, 0x7a48, 0x7a9d, 0x7aed, 0x7b39, 0x7b80, 0x7bc4, 0x7c03, + 0x7c3f, 0x7c78, 0x7cad, 0x7ce0, 0x7d0f, 0x7d3c, 0x7d66, 0x7d8d, + 0x7db3, 0x7dd6, 0x7df7, 0x7e16, 0x7e33, 0x7e4f, 0x7e69, 0x7e81, + 0x7e98, 0x7eae, 0x7ec2, 0x7ed5, 0x7ee7, 0x7ef8, 0x7f08, 0x7f17, + 0x7f25, 0x7f32, 0x7f3e, 0x7f4a, 0x7f55, 0x7f5f, 0x7f69, 0x7f72, + 0x7f7b, 0x7f83, 0x7f8a, 0x7f91, 0x7f98, 0x7f9e, 0x7fa4, 0x7faa, + 0x7faf, 0x7fb4, 0x7fb8, 0x7fbd, 0x7fc1, 0x7fc5, 0x7fc8, 0x7fcc, + 0x7fcf, 0x7fd2, 0x7fd5, 0x7fd7, 0x7fda, 0x7fdc, 0x7fde, 0x7fe0, + 0x7fe2, 0x7fe4, 0x7fe6, 0x7fe7, 0x7fe9, 0x7fea, 0x7feb, 0x7fed, + 0x7fee, 0x7fef, 0x7ff0, 0x7ff1, 0x7ff2, 0x7ff3, 0x7ff4, 0x7ff4, + 0x000b, 0x000c, 0x000c, 0x000d, 0x000e, 0x000f, 0x0010, 0x0011, + 0x0012, 0x0013, 0x0015, 0x0016, 0x0017, 0x0019, 0x001a, 0x001c, + 0x001e, 0x0020, 0x0022, 0x0024, 0x0026, 0x0029, 0x002b, 0x002e, + 0x0031, 0x0034, 0x0038, 0x003b, 0x003f, 0x0043, 0x0048, 0x004c, + 0x0051, 0x0056, 0x005c, 0x0062, 0x0068, 0x006f, 0x0076, 0x007d, + 0x0085, 0x008e, 0x0097, 0x00a1, 0x00ab, 0x00b6, 0x00c2, 0x00ce, + 0x00db, 0x00e9, 0x00f8, 0x0108, 0x0119, 0x012b, 0x013e, 0x0152, + 0x0168, 0x017f, 0x0197, 0x01b1, 0x01cd, 0x01ea, 0x0209, 0x022a, + 0x024d, 0x0273, 0x029a, 0x02c4, 0x02f1, 0x0320, 0x0353, 0x0388, + 0x03c1, 0x03fd, 0x043c, 0x0480, 0x04c7, 0x0513, 0x0563, 0x05b8, + 0x0612, 0x0671, 0x06d6, 0x0740, 0x07b1, 0x0828, 0x08a5, 0x092a, + 0x09b6, 0x0a49, 0x0ae5, 0x0b88, 0x0c34, 0x0cea, 0x0da8, 0x0e70, +}; + +const q7_t tanhTable_q7[256] = { + 0x00, 0x08, 0x10, 0x18, 0x1f, 0x27, 0x2e, 0x35, + 0x3b, 0x41, 0x47, 0x4c, 0x51, 0x56, 0x5a, 0x5e, + 0x61, 0x65, 0x68, 0x6a, 0x6d, 0x6f, 0x71, 0x72, + 0x74, 0x75, 0x76, 0x78, 0x78, 0x79, 0x7a, 0x7b, + 0x7b, 0x7c, 0x7c, 0x7d, 0x7d, 0x7e, 0x7e, 0x7e, + 0x7e, 0x7e, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x81, + 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x82, + 0x82, 0x82, 0x82, 0x82, 0x83, 0x83, 0x84, 0x84, + 0x85, 0x85, 0x86, 0x87, 0x88, 0x88, 0x8a, 0x8b, + 0x8c, 0x8e, 0x8f, 0x91, 0x93, 0x96, 0x98, 0x9b, + 0x9f, 0xa2, 0xa6, 0xaa, 0xaf, 0xb4, 0xb9, 0xbf, + 0xc5, 0xcb, 0xd2, 0xd9, 0xe1, 0xe8, 0xf0, 0xf8, +}; + +const q15_t tanhTable_q15[256] = { + 0x0000, 0x07fd, 0x0feb, 0x17b9, 0x1f59, 0x26bf, 0x2ddf, 0x34ae, + 0x3b27, 0x4142, 0x46fd, 0x4c56, 0x514d, 0x55e2, 0x5a1a, 0x5df6, + 0x617c, 0x64b0, 0x6797, 0x6a37, 0x6c95, 0x6eb5, 0x709e, 0x7254, + 0x73dc, 0x753a, 0x7672, 0x7788, 0x787f, 0x795b, 0x7a1e, 0x7acb, + 0x7b65, 0x7bee, 0x7c66, 0x7cd1, 0x7d30, 0x7d84, 0x7dce, 0x7e0f, + 0x7e49, 0x7e7d, 0x7eaa, 0x7ed2, 0x7ef5, 0x7f14, 0x7f30, 0x7f48, + 0x7f5e, 0x7f71, 0x7f82, 0x7f91, 0x7f9e, 0x7fa9, 0x7fb3, 0x7fbc, + 0x7fc4, 0x7fcb, 0x7fd1, 0x7fd7, 0x7fdc, 0x7fe0, 0x7fe4, 0x7fe7, + 0x7fea, 0x7fed, 0x7fef, 0x7ff1, 0x7ff3, 0x7ff4, 0x7ff6, 0x7ff7, + 0x7ff8, 0x7ff9, 0x7ffa, 0x7ffa, 0x7ffb, 0x7ffc, 0x7ffc, 0x7ffd, + 0x7ffd, 0x7ffd, 0x7ffe, 0x7ffe, 0x7ffe, 0x7ffe, 0x7fff, 0x7fff, + 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, + 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, + 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, + 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, + 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, + 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, + 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, + 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, + 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, + 0x8000, 0x8000, 0x8001, 0x8001, 0x8001, 0x8001, 0x8001, 0x8001, + 0x8001, 0x8001, 0x8001, 0x8002, 0x8002, 0x8002, 0x8002, 0x8003, + 0x8003, 0x8003, 0x8004, 0x8004, 0x8005, 0x8006, 0x8006, 0x8007, + 0x8008, 0x8009, 0x800a, 0x800c, 0x800d, 0x800f, 0x8011, 0x8013, + 0x8016, 0x8019, 0x801c, 0x8020, 0x8024, 0x8029, 0x802f, 0x8035, + 0x803c, 0x8044, 0x804d, 0x8057, 0x8062, 0x806f, 0x807e, 0x808f, + 0x80a2, 0x80b8, 0x80d0, 0x80ec, 0x810b, 0x812e, 0x8156, 0x8183, + 0x81b7, 0x81f1, 0x8232, 0x827c, 0x82d0, 0x832f, 0x839a, 0x8412, + 0x849b, 0x8535, 0x85e2, 0x86a5, 0x8781, 0x8878, 0x898e, 0x8ac6, + 0x8c24, 0x8dac, 0x8f62, 0x914b, 0x936b, 0x95c9, 0x9869, 0x9b50, + 0x9e84, 0xa20a, 0xa5e6, 0xaa1e, 0xaeb3, 0xb3aa, 0xb903, 0xbebe, + 0xc4d9, 0xcb52, 0xd221, 0xd941, 0xe0a7, 0xe847, 0xf015, 0xf803, +}; + +const q15_t tanhLTable_q15[128] = { + 0x0000, 0x0400, 0x07fd, 0x0bf7, 0x0feb, 0x13d7, 0x17b9, 0x1b90, + 0x1f59, 0x2314, 0x26bf, 0x2a58, 0x2ddf, 0x3151, 0x34ae, 0x37f6, + 0x3b27, 0x3e40, 0x4142, 0x442c, 0x46fd, 0x49b6, 0x4c56, 0x4edd, + 0x514d, 0x53a3, 0x55e2, 0x580a, 0x5a1a, 0x5c13, 0x5df6, 0x5fc4, + 0x617c, 0x6320, 0x64b0, 0x662d, 0x6797, 0x68f0, 0x6a37, 0x6b6e, + 0x6c95, 0x6dac, 0x6eb5, 0x6fb0, 0x709e, 0x717f, 0x7254, 0x731e, + 0x73dc, 0x7490, 0x753a, 0x75da, 0x7672, 0x7701, 0x7788, 0x7807, + 0x787f, 0x78f0, 0x795b, 0x79bf, 0x7a1e, 0x7a77, 0x7acb, 0x7b1b, + 0x849b, 0x84e5, 0x8535, 0x8589, 0x85e2, 0x8641, 0x86a5, 0x8710, + 0x8781, 0x87f9, 0x8878, 0x88ff, 0x898e, 0x8a26, 0x8ac6, 0x8b70, + 0x8c24, 0x8ce2, 0x8dac, 0x8e81, 0x8f62, 0x9050, 0x914b, 0x9254, + 0x936b, 0x9492, 0x95c9, 0x9710, 0x9869, 0x99d3, 0x9b50, 0x9ce0, + 0x9e84, 0xa03c, 0xa20a, 0xa3ed, 0xa5e6, 0xa7f6, 0xaa1e, 0xac5d, + 0xaeb3, 0xb123, 0xb3aa, 0xb64a, 0xb903, 0xbbd4, 0xbebe, 0xc1c0, + 0xc4d9, 0xc80a, 0xcb52, 0xceaf, 0xd221, 0xd5a8, 0xd941, 0xdcec, + 0xe0a7, 0xe470, 0xe847, 0xec29, 0xf015, 0xf409, 0xf803, 0xfc00, +}; + +const q15_t tanhHTable_q15[192] = { + 0x7b65, 0x7bee, 0x7c66, 0x7cd1, 0x7d30, 0x7d84, 0x7dce, 0x7e0f, + 0x7e49, 0x7e7d, 0x7eaa, 0x7ed2, 0x7ef5, 0x7f14, 0x7f30, 0x7f48, + 0x7f5e, 0x7f71, 0x7f82, 0x7f91, 0x7f9e, 0x7fa9, 0x7fb3, 0x7fbc, + 0x7fc4, 0x7fcb, 0x7fd1, 0x7fd7, 0x7fdc, 0x7fe0, 0x7fe4, 0x7fe7, + 0x7fea, 0x7fed, 0x7fef, 0x7ff1, 0x7ff3, 0x7ff4, 0x7ff6, 0x7ff7, + 0x7ff8, 0x7ff9, 0x7ffa, 0x7ffa, 0x7ffb, 0x7ffc, 0x7ffc, 0x7ffd, + 0x7ffd, 0x7ffd, 0x7ffe, 0x7ffe, 0x7ffe, 0x7ffe, 0x7fff, 0x7fff, + 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, + 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, + 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, + 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, + 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, + 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, + 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, + 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, + 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, + 0x8000, 0x8000, 0x8001, 0x8001, 0x8001, 0x8001, 0x8001, 0x8001, + 0x8001, 0x8001, 0x8001, 0x8002, 0x8002, 0x8002, 0x8002, 0x8003, + 0x8003, 0x8003, 0x8004, 0x8004, 0x8005, 0x8006, 0x8006, 0x8007, + 0x8008, 0x8009, 0x800a, 0x800c, 0x800d, 0x800f, 0x8011, 0x8013, + 0x8016, 0x8019, 0x801c, 0x8020, 0x8024, 0x8029, 0x802f, 0x8035, + 0x803c, 0x8044, 0x804d, 0x8057, 0x8062, 0x806f, 0x807e, 0x808f, + 0x80a2, 0x80b8, 0x80d0, 0x80ec, 0x810b, 0x812e, 0x8156, 0x8183, + 0x81b7, 0x81f1, 0x8232, 0x827c, 0x82d0, 0x832f, 0x839a, 0x8412, +}; diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_q7_to_q15_no_shift.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_q7_to_q15_no_shift.c new file mode 100755 index 00000000000..c6d3f3ce3fc --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_q7_to_q15_no_shift.c @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_q7_to_q15_no_shift.c + * Description: Converts the elements of the Q7 vector to Q15 vector without left-shift + * + * $Date: February 27, 2020 + * $Revision: V.1.0.1 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup nndata_convert + * @{ + */ + +/** + * @brief Converts the elements of the Q7 vector to Q15 vector without left-shift + * @param[in] *pSrc points to the Q7 input vector + * @param[out] *pDst points to the Q15 output vector + * @param[in] blockSize length of the input vector + * + * \par Description: + * + * The equation used for the conversion process is: + * + *
+ * 	pDst[n] = (q15_t) pSrc[n];   0 <= n < blockSize.
+ * 
+ * + */ + +void arm_q7_to_q15_no_shift(const q7_t * pSrc, q15_t * pDst, uint32_t blockSize) +{ + const q7_t *pIn = pSrc; + uint32_t blkCnt; + +#if defined(ARM_MATH_DSP) + q31_t in; + q31_t in1, in2; + q31_t out1, out2; + + /*loop Unrolling */ + blkCnt = blockSize >> 2u; + + /* First part of the processing with loop unrolling. Compute 4 outputs at a time. */ + while (blkCnt > 0u) + { + in = arm_nn_read_q7x4_ia(&pIn); + + /* rotatate in by 8 and extend two q7_t values to q15_t values */ + in1 = __SXTB16(__ROR(in, 8)); + + /* extend remaining two q7_t values to q15_t values */ + in2 = __SXTB16(in); + +#ifndef ARM_MATH_BIG_ENDIAN + out2 = __PKHTB(in1, in2, 16); + out1 = __PKHBT(in2, in1, 16); +#else + out1 = __PKHTB(in1, in2, 16); + out2 = __PKHBT(in2, in1, 16); +#endif + write_q15x2_ia(&pDst, out1); + write_q15x2_ia(&pDst, out2); + + /* Decrement the loop counter */ + blkCnt--; + } + + /* If the blockSize is not a multiple of 4, compute any remaining output samples here. + ** No loop unrolling is used. */ + blkCnt = blockSize % 0x4u; + +#else + + /* Run the below code for Cortex-M0 */ + + /* Loop over blockSize number of values */ + blkCnt = blockSize; + +#endif /* #ifndef ARM_MATH_CM0_FAMILY */ + + while (blkCnt > 0u) + { + /* convert from q7 to q15 and then store the results in the destination buffer */ + *pDst++ = (q15_t)*pIn++; + + /* Decrement the loop counter */ + blkCnt--; + } + +} + +/** + * @} end of nndata_convert group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_q7_to_q15_reordered_with_offset.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_q7_to_q15_reordered_with_offset.c new file mode 100755 index 00000000000..7a8eb59ea47 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_q7_to_q15_reordered_with_offset.c @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_q7_to_q15_reordered_with_offset.c + * Description: Converts the elements of the Q7 vector to a reordered Q15 vector with an added offset. The re-ordering + * is a signature of sign extension intrinsic(DSP extension). + * + * $Date: March 3, 2020 + * $Revision: V.2.0.2 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup nndata_convert + * @{ + */ + +/** + * @brief Converts the elements of the Q7 vector to a reordered Q15 vector with an added offset. + * + * @note Refer header file for details. + * + */ + +void arm_q7_to_q15_reordered_with_offset(const q7_t *src, q15_t *dst, uint32_t block_size, q15_t offset) +{ + +#if defined(ARM_MATH_DSP) + uint32_t block_cnt; + /* Run the below code for cores that support SIMD instructions */ + q31_t in_q7x4; + q31_t out_q15x2_1; + q31_t out_q15x2_2; + + /*loop unrolling */ + block_cnt = block_size >> 2u; + + /* First part of the processing with loop unrolling. Compute 4 outputs at a time. */ + const q31_t offset_q15x2 = __PKHBT(offset, offset, 16); + while (block_cnt > 0u) + { + /* convert from q7 to q15 and then store the results in the destination buffer */ + in_q7x4 = arm_nn_read_q7x4_ia(&src); + + /* Extract and sign extend each of the four q7 values to q15 */ + out_q15x2_1 = __SXTAB16(offset_q15x2, __ROR(in_q7x4, 8)); + out_q15x2_2 = __SXTAB16(offset_q15x2, in_q7x4); + + write_q15x2_ia(&dst, out_q15x2_2); + write_q15x2_ia(&dst, out_q15x2_1); + + block_cnt--; + } + /* Handle left over samples */ + block_cnt = block_size % 0x4u; + + while (block_cnt > 0u) + { + *dst++ = (q15_t)*src++ + offset; + + /* Decrement the loop counter */ + block_cnt--; + } +#else + (void)src; + (void)dst; + (void)block_size; + (void)offset; + /* Not available */ +#endif +} + +/** + * @} end of nndata_convert group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_q7_to_q15_with_offset.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_q7_to_q15_with_offset.c new file mode 100755 index 00000000000..69c70e38113 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/NNSupportFunctions/arm_q7_to_q15_with_offset.c @@ -0,0 +1,117 @@ +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in_q7x4 compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in_q7x4 writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_q7_to_q15_with_offset.c + * Description: Converts the elements of the Q7 vector to Q15 vector with an added offset + * + * $Date: March 3, 2020 + * $Revision: V.2.0.2 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup nndata_convert + * @{ + */ + +void arm_q7_to_q15_with_offset(const q7_t *src, + q15_t *dst, + uint32_t block_size, + q15_t offset) +{ + int block_cnt; + +#if defined(ARM_MATH_MVEI) + + int16x8_t source; + const int16x8_t source_offset = vdupq_n_s16(offset); + block_cnt = block_size / 8; + + while (block_cnt > 0) + { + source = vldrbq_s16(src); + source = vaddq_s16(source, source_offset); + vstrhq_s16(dst, source); + dst += 8; + src += 8; + block_cnt--; + } + + block_cnt = block_size & 0x7; + +#elif defined(ARM_MATH_DSP) + /* Run the below code for cores that support SIMD instructions */ + q31_t in_q7x4; + q31_t in_q15x2_1; + q31_t in_q15x2_2; + q31_t out_q15x2_1; + q31_t out_q15x2_2; + + /*loop unrolling */ + block_cnt = block_size >> 2; + + /* First part of the processing with loop unrolling. Compute 4 outputs at a time. */ + const q31_t offset_q15x2 = __PKHBT(offset, offset, 16); + while (block_cnt > 0) + { + /* convert from q7 to q15 and then store the results in the destination buffer */ + in_q7x4 = arm_nn_read_q7x4_ia(&src); + + /* Extract and sign extend each of the four q7 values to q15 */ + in_q15x2_1 = __SXTAB16(offset_q15x2, __ROR(in_q7x4, 8)); + in_q15x2_2 = __SXTAB16(offset_q15x2, in_q7x4); + + out_q15x2_2 = __PKHTB(in_q15x2_1, in_q15x2_2, 16); + out_q15x2_1 = __PKHBT(in_q15x2_2, in_q15x2_1, 16); + + write_q15x2_ia(&dst, out_q15x2_1); + write_q15x2_ia(&dst, out_q15x2_2); + + block_cnt--; + } + /* Handle left over samples */ + block_cnt = block_size % 0x4; + +#else + /* Run the below code for Cortex-M0 */ + /* Loop over block_size number of values */ + block_cnt = block_size; +#endif + + while (block_cnt > 0) + { + *dst++ = (q15_t)*src++ + offset; + + /* Decrement the loop counter */ + block_cnt--; + } +} + +/** + * @} end of nndata_convert group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/PoolingFunctions/CMakeLists.txt b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/PoolingFunctions/CMakeLists.txt new file mode 100755 index 00000000000..36e2ff7c429 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/PoolingFunctions/CMakeLists.txt @@ -0,0 +1,19 @@ +cmake_minimum_required (VERSION 3.6) + +project(CMSISNNPooling) + +include(configLib) + +file(GLOB SRC "./*_*.c") + +add_library(CMSISNNPooling STATIC ${SRC}) + +configLib(CMSISNNPooling ${ROOT}) +configDsp(CMSISNNPooling ${ROOT}) + +### Includes +target_include_directories(CMSISNNPooling PUBLIC "${NN}/Include") +target_include_directories(CMSISNNPooling PUBLIC "${ROOT}/CMSIS/DSP/Include") + + + diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/PoolingFunctions/arm_avgpool_s8.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/PoolingFunctions/arm_avgpool_s8.c new file mode 100755 index 00000000000..949467a427a --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/PoolingFunctions/arm_avgpool_s8.c @@ -0,0 +1,279 @@ +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_avgpool_s8.c + * Description: Pooling function implementations + * + * $Date: March 4,2020 + * $Revision: V.1.1.1 + * + * Target Processor: Cortex-M CPUs + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup Pooling + * @{ + */ + + /* + * s8 average pooling function + * + * Refer to header file for details. + * + */ + +#if defined(ARM_MATH_MVEI) + +arm_status arm_avgpool_s8(const int dim_src_height, + const int dim_src_width, + const int dim_dst_height, + const int dim_dst_width, + const int stride_height, + const int stride_width, + const int dim_kernel_height, + const int dim_kernel_width, + const int padding_height, + const int padding_width, + const int act_min, + const int act_max, + const int ch_src, + int8_t *src, + int16_t *bufferA, + int8_t *dst) +{ + (void)bufferA; + int32_t i_x, i_y; + int32_t k_x, k_y; + + for (i_y = 0; i_y < dim_dst_height; i_y++) + { + for (i_x = 0; i_x < dim_dst_width; i_x++) + { + + int32_t k_y_start,k_y_end; + int32_t k_x_start,k_x_end; + int32_t chCnt; + int8_t *pTmp,*pTmpInner; + int8_t *pDst; + + k_y_start = MAX(0, i_y * stride_height - padding_height); + k_y_end = MIN(i_y * stride_height - padding_height + dim_kernel_height,dim_src_height); + + k_x_start = MAX(0,i_x * stride_width - padding_width); + k_x_end = MIN(i_x * stride_width - padding_width + dim_kernel_width, dim_src_width); + + + pTmp = src; + pDst = &dst[ch_src * (i_x + i_y * dim_dst_width)]; + + chCnt = ch_src >> 4; + while(chCnt > 0) + { + int32x4_t sumV1,sumV2,sumV3,sumV4; + + int8x16_t tempV; + int16x8_t tempVLO, tempVHI; + int32x4_t tempVLOLO, tempVLOHI, tempVHILO, tempVHIHI; + int32_t count = 0; + + sumV1 = vdupq_n_s32(0); + sumV2 = vdupq_n_s32(0); + sumV3 = vdupq_n_s32(0); + sumV4 = vdupq_n_s32(0); + + for (k_y = k_y_start; k_y < k_y_end; k_y++) + { + for (k_x = k_x_start; k_x < k_x_end; k_x++) + { + pTmpInner = pTmp + (ch_src * (k_x + k_y * dim_src_width)); + tempV = vldrbq_s8 (pTmpInner); + + tempVLO = vmovlbq_s8(tempV); + tempVHI = vmovltq_s8(tempV); + + tempVLOLO = vmovlbq_s16(tempVLO); + tempVLOHI = vmovltq_s16(tempVLO); + + tempVHILO = vmovlbq_s16(tempVHI); + tempVHIHI = vmovltq_s16(tempVHI); + + sumV1 = vaddq_s32(sumV1,tempVLOLO); + sumV2 = vaddq_s32(sumV2,tempVLOHI); + sumV3 = vaddq_s32(sumV3,tempVHILO); + sumV4 = vaddq_s32(sumV4,tempVHIHI); + + count++; + } + } + + + sumV1[0] = sumV1[0] > 0 ? (sumV1[0] + count / 2) / count : (sumV1[0] - count / 2) / count; + sumV1[1] = sumV1[1] > 0 ? (sumV1[1] + count / 2) / count : (sumV1[1] - count / 2) / count; + sumV1[2] = sumV1[2] > 0 ? (sumV1[2] + count / 2) / count : (sumV1[2] - count / 2) / count; + sumV1[3] = sumV1[3] > 0 ? (sumV1[3] + count / 2) / count : (sumV1[3] - count / 2) / count; + + sumV2[0] = sumV2[0] > 0 ? (sumV2[0] + count / 2) / count : (sumV2[0] - count / 2) / count; + sumV2[1] = sumV2[1] > 0 ? (sumV2[1] + count / 2) / count : (sumV2[1] - count / 2) / count; + sumV2[2] = sumV2[2] > 0 ? (sumV2[2] + count / 2) / count : (sumV2[2] - count / 2) / count; + sumV2[3] = sumV2[3] > 0 ? (sumV2[3] + count / 2) / count : (sumV2[3] - count / 2) / count; + + sumV3[0] = sumV3[0] > 0 ? (sumV3[0] + count / 2) / count : (sumV3[0] - count / 2) / count; + sumV3[1] = sumV3[1] > 0 ? (sumV3[1] + count / 2) / count : (sumV3[1] - count / 2) / count; + sumV3[2] = sumV3[2] > 0 ? (sumV3[2] + count / 2) / count : (sumV3[2] - count / 2) / count; + sumV3[3] = sumV3[3] > 0 ? (sumV3[3] + count / 2) / count : (sumV3[3] - count / 2) / count; + + sumV4[0] = sumV4[0] > 0 ? (sumV4[0] + count / 2) / count : (sumV4[0] - count / 2) / count; + sumV4[1] = sumV4[1] > 0 ? (sumV4[1] + count / 2) / count : (sumV4[1] - count / 2) / count; + sumV4[2] = sumV4[2] > 0 ? (sumV4[2] + count / 2) / count : (sumV4[2] - count / 2) / count; + sumV4[3] = sumV4[3] > 0 ? (sumV4[3] + count / 2) / count : (sumV4[3] - count / 2) / count; + + sumV1 = vmaxq_s32(sumV1, vdupq_n_s32(act_min)); + sumV1 = vminq_s32(sumV1, vdupq_n_s32(act_max)); + + sumV2 = vmaxq_s32(sumV2, vdupq_n_s32(act_min)); + sumV2 = vminq_s32(sumV2, vdupq_n_s32(act_max)); + + sumV3 = vmaxq_s32(sumV3, vdupq_n_s32(act_min)); + sumV3 = vminq_s32(sumV3, vdupq_n_s32(act_max)); + + sumV4 = vmaxq_s32(sumV4, vdupq_n_s32(act_min)); + sumV4 = vminq_s32(sumV4, vdupq_n_s32(act_max)); + + tempVLO = vmovnbq_s32(tempVLO,sumV1); + tempVLO = vmovntq_s32(tempVLO,sumV2); + + tempVHI = vmovnbq_s32(tempVHI,sumV3); + tempVHI = vmovntq_s32(tempVHI,sumV4); + + + tempV = vmovnbq_s16(tempV,tempVLO); + tempV = vmovntq_s16(tempV,tempVHI); + + vstrbq_s8(pDst,tempV); + pDst += 16; + + chCnt --; + pTmp += 16; + } + + chCnt = ch_src & 0xF; + while(chCnt > 0) + { + int32_t sum = 0; + int32_t count = 0; + + for (k_y = k_y_start; k_y < k_y_end; k_y++) + { + for (k_x = k_x_start; k_x < k_x_end; k_x++) + { + sum += pTmp[ch_src * (k_x + k_y * dim_src_width)]; + count++; + } + } + sum = sum > 0 ? (sum + count / 2) / count : (sum - count / 2) / count; + sum = MAX(sum, act_min); + sum = MIN(sum, act_max); + + *pDst++ = sum; + + chCnt --; + pTmp++; + } + } + } + return ARM_MATH_SUCCESS; +} + +#else +arm_status arm_avgpool_s8(const int dim_src_height, + const int dim_src_width, + const int dim_dst_height, + const int dim_dst_width, + const int stride_height, + const int stride_width, + const int dim_kernel_height, + const int dim_kernel_width, + const int padding_height, + const int padding_width, + const int act_min, + const int act_max, + const int ch_src, + int8_t *src, + int16_t *bufferA, + int8_t *dst) +{ + +/* Reference C code adapted from CMSIS-NN arm_avepool_q7_HWC. + */ + (void)bufferA; + int16_t i_ch_in, i_x, i_y; + int16_t k_x, k_y; + + for (i_y = 0; i_y < dim_dst_height; i_y++) + { + for (i_x = 0; i_x < dim_dst_width; i_x++) + { + for (i_ch_in = 0; i_ch_in < ch_src; i_ch_in++) + { + int sum = 0; + int count = 0; + for (k_y = i_y * stride_height - padding_height; k_y < i_y * stride_height - padding_height + dim_kernel_height; k_y++) + { + for (k_x = i_x * stride_width - padding_width; k_x < i_x * stride_width - padding_width + dim_kernel_width; k_x++) + { + if (k_y >= 0 && k_x >= 0 && k_y < dim_src_height && k_x < dim_src_width) + { + sum += src[i_ch_in + ch_src * (k_x + k_y * dim_src_width)]; + count++; + } + } + } + sum = sum > 0 ? (sum + count / 2) / count : (sum - count / 2) / count; + sum = MAX(sum, act_min); + sum = MIN(sum, act_max); + + dst[i_ch_in + ch_src * (i_x + i_y * dim_dst_width)] = sum; + } + } + } + + return ARM_MATH_SUCCESS; +} + +#endif /* ARM_MATH_MVEI */ + +int32_t arm_avgpool_s8_get_buffer_size(const int dim_dst_width, + const int ch_src) +{ + (void)dim_dst_width; + (void)ch_src; + return 0; +} +/** + * @} end of Pooling group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/PoolingFunctions/arm_max_pool_s8.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/PoolingFunctions/arm_max_pool_s8.c new file mode 100755 index 00000000000..c4224f09495 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/PoolingFunctions/arm_max_pool_s8.c @@ -0,0 +1,106 @@ +/* + * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_max_pool_s8.c + * Description: Pure C max pool implementation + * + * $Date: August 2019 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_nnfunctions.h" +#include + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup Pooling + * @{ + */ + +arm_status arm_max_pool_s8(const uint16_t input_y, + const uint16_t input_x, + const uint16_t output_y, + const uint16_t output_x, + const uint16_t stride_y, + const uint16_t stride_x, + const uint16_t kernel_y, + const uint16_t kernel_x, + const uint16_t pad_y, + const uint16_t pad_x, + const int8_t act_min, + const int8_t act_max, + const uint16_t channel_in, + int8_t *input, + int16_t *tmp_buffer, + int8_t *output) +{ + int32_t i_ch_in, i_out_x, i_out_y; + int32_t i_ker_x, i_ker_y; + (void)tmp_buffer; + + for (i_out_y = 0; i_out_y < output_y; i_out_y++) + { + for (i_out_x = 0; i_out_x < output_x; i_out_x++) + { + for (i_ch_in = 0; i_ch_in < channel_in; i_ch_in++) + { + /* Native data type for inner loop variables */ + int32_t max_val = (int8_t)Q7_MIN; + /* Condition for kernel start dimension: (base_idx_ + ker__start) >= 0 */ + const int32_t base_idx_y = (i_out_y * stride_y) - pad_y; + const int32_t base_idx_x = (i_out_x * stride_x) - pad_x; + const int32_t ker_y_start = MAX(0, -base_idx_y); + const int32_t ker_x_start = MAX(0, -base_idx_x); + + /* Condition for kernel end dimension: (base_idx_ + ker__end) < input_ */ + const int32_t ker_y_end = MIN(kernel_y, input_y - base_idx_y); + const int32_t ker_x_end = MIN(kernel_x, input_x - base_idx_x); + + for (i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) + { + for (i_ker_x = ker_x_start; i_ker_x < ker_x_end; i_ker_x++) + { + const int32_t col_idx = base_idx_x + i_ker_x; + const int32_t row_idx = base_idx_y + i_ker_y; + + max_val = MAX(input[(row_idx * input_x + col_idx) * channel_in + i_ch_in], max_val); + } + } + + /* Activation function */ + max_val = MAX(max_val, act_min); + max_val = MIN(max_val, act_max); + + output[i_ch_in + channel_in * (i_out_x + i_out_y * output_x)] = (int8_t)max_val; + } + } + } + return ARM_MATH_SUCCESS; +} + +/** + * @} end of Pooling group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/PoolingFunctions/arm_max_pool_s8_opt.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/PoolingFunctions/arm_max_pool_s8_opt.c new file mode 100755 index 00000000000..243e1e5ac6c --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/PoolingFunctions/arm_max_pool_s8_opt.c @@ -0,0 +1,335 @@ +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0dim_dst_width + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_max_pool_s8_opt.c + * Description: Pooling function implementations + * + * $Date: February 27, 2020 + * $Revision: V.1.0.1 + * + * Target Processor: Cortex-M + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +#if defined(ARM_MATH_DSP) + +static void compare_and_replace_if_larger_q7(q7_t *base, + const q7_t *target, + const uint16_t length) +{ +#if defined(ARM_MATH_MVEI) + + int loop_count = length / 16; + while (loop_count) + { + const int8x16_t op_1 = vldrbq_s8(base); + const int8x16_t op_2 = vldrbq_s8(target); + const int8x16_t max = vmaxq_s8(op_1, op_2); + vstrbq_s8(base, max); + base += 16; + target += 16; + loop_count--; + } + + if (((length & 0xF) / 8) > 0) + { + const int16x8_t op_1 = vldrbq_s16(base); + const int16x8_t op_2 = vldrbq_s16(target); + const int16x8_t max = vmaxq_s16(op_1, op_2); + vstrbq_s16(base, max); + + base += 8; + target += 8; + } + + for (int i = 0; i < (length & 7); i++) + { + if (target[i] > base[i]) + { + base[i] = target[i]; + } + } + +#else + q7_t *dst = base; + const q7_t *src = target; + union arm_nnword ref_max; + union arm_nnword comp_max; + int32_t cnt = length >> 2; + + while (cnt > 0l) + { + ref_max.word = arm_nn_read_q7x4(dst); + comp_max.word = arm_nn_read_q7x4_ia(&src); + + if (comp_max.bytes[0] > ref_max.bytes[0]) + { + ref_max.bytes[0] = comp_max.bytes[0]; + } + if (comp_max.bytes[1] > ref_max.bytes[1]) + { + ref_max.bytes[1] = comp_max.bytes[1]; + } + if (comp_max.bytes[2] > ref_max.bytes[2]) + { + ref_max.bytes[2] = comp_max.bytes[2]; + } + if (comp_max.bytes[3] > ref_max.bytes[3]) + { + ref_max.bytes[3] = comp_max.bytes[3]; + } + + write_q7x4_ia(&dst, ref_max.word); + + cnt--; + } + + cnt = length & 0x3; + while (cnt > 0l) + { + if (*src > *dst) + { + *dst = *src; + } + dst++; + src++; + cnt--; + } +#endif +} + +static void clamp_output(q7_t *source, const uint16_t length, const int32_t act_min, const int32_t act_max) +{ +#if defined(ARM_MATH_MVEI) + + int cnt = length / 16; + while (cnt > 0) + { + const int8x16_t src = vldrbq_s8(source); + int8x16_t res = vmaxq_s8(src, vdupq_n_s8((int8_t)act_min)); + res = vminq_s8(src, vdupq_n_s8((int8_t)act_max)); + vstrbq_s8(source, res); + source += 16; + cnt--; + } + + if (((length & 0xF) / 8) > 0) + { + const int16x8_t src = vldrbq_s16(source); + int16x8_t res = vmaxq_s16(src, vdupq_n_s16((int16_t)act_min)); + res = vminq_s16(src, vdupq_n_s16((int16_t)act_max)); + vstrbq_s16(source, res); + source += 8; + } + + cnt = length & 7; + while (cnt > 0) + { + int32_t comp = *source; + comp = MAX(comp, act_min); + comp = MIN(comp, act_max); + *source++ = (int8_t)comp; + cnt--; + } + +#else + union arm_nnword in; + int32_t cnt = length >> 2; + + while (cnt > 0l) + { + in.word = arm_nn_read_q7x4(source); + + in.bytes[0] = MAX(in.bytes[0], act_min); + in.bytes[0] = MIN(in.bytes[0], act_max); + in.bytes[1] = MAX(in.bytes[1], act_min); + in.bytes[1] = MIN(in.bytes[1], act_max); + in.bytes[2] = MAX(in.bytes[2], act_min); + in.bytes[2] = MIN(in.bytes[2], act_max); + in.bytes[3] = MAX(in.bytes[3], act_min); + in.bytes[3] = MIN(in.bytes[3], act_max); + + write_q7x4_ia(&source, in.word); + cnt--; + } + + cnt = length & 0x3; + while (cnt > 0l) + { + int32_t comp = *source; + comp = MAX(comp, act_min); + comp = MIN(comp, act_max); + *source++ = (int8_t)comp; + cnt--; + } +#endif +} +#endif + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup Pooling + * @{ + */ + +/* + * Optimized s8 max pooling function + * + * Refer to header file for details. + * + */ + +arm_status arm_max_pool_s8_opt(const uint16_t input_y, + const uint16_t input_x, + const uint16_t output_y, + const uint16_t output_x, + const uint16_t stride_y, + const uint16_t stride_x, + const uint16_t kernel_y, + const uint16_t kernel_x, + const uint16_t pad_y, + const uint16_t pad_x, + const int8_t act_min, + const int8_t act_max, + const uint16_t depth, + int8_t *src, + int16_t *tmp_buffer, + int8_t *dst) +{ + +#if defined(ARM_MATH_DSP) + + /* Run the following code for Cortex-M4 and Cortex-M7 */ + (void)tmp_buffer; + int32_t i_x, i_y; + + /* first does the pooling along x axis */ + for (i_y = 0; i_y < input_y; i_y++) + { + + for (i_x = 0; i_x < output_x; i_x++) + { + /* for each output sample */ + q7_t *target = src + (i_y * input_x + i_x) * depth; + q7_t *win_start; + q7_t *win_stop; + const int32_t x_origin = i_x * stride_x - pad_x; + if (x_origin < 0) + { + win_start = target; + } + else + { + win_start = src + (i_y * input_x + x_origin) * depth; + } + + if (x_origin + kernel_x >= input_x) + { + win_stop = src + (i_y * input_x + input_x) * depth; + } + else + { + win_stop = src + (i_y * input_x + x_origin + kernel_x) * depth; + } + + /* first step is to copy over initial data(along channel) along the channel in x direction */ + memmove(target, win_start, depth); + + /* Move over to next element along x axis and compare with the base(target) */ + win_start += depth; + for (; win_start < win_stop; win_start += depth) + { + compare_and_replace_if_larger_q7(target, win_start, depth); + } + } + } + + /* then does the pooling along y axis */ + for (i_y = 0; i_y < output_y; i_y++) + { + /* for each output row */ + q7_t *target = dst + i_y * output_x * depth; + q7_t *row_start; + q7_t *row_end; + const int32_t y_origin = i_y * stride_y - pad_y; + /* setting the starting row */ + if (y_origin < 0) + { + row_start = src; + } + else + { + row_start = src + y_origin * input_x * depth; + } + /* setting the stopping row */ + if (y_origin + kernel_y >= input_y) + { + row_end = src + input_y * input_x * depth; + } + else + { + row_end = src + (y_origin + kernel_y) * input_x * depth; + } + + /* copy over the complete first row. */ + memmove(target, row_start, output_x * depth); + + /* move over to next row and compare with the base row (target)*/ + row_start += depth * input_x; + + for (; row_start < row_end; row_start += input_x * depth) + { + compare_and_replace_if_larger_q7(target, row_start, output_x * depth); + } + } + + clamp_output(dst, output_x * output_y * depth, act_min, act_max); + +#else + /* Pure C implementation */ + arm_max_pool_s8(input_y, + input_x, + output_y, + output_x, + stride_y, + stride_x, + kernel_y, + kernel_x, + pad_y, + pad_x, + act_min, + act_max, + depth, + src, + tmp_buffer, + dst); +#endif + return ARM_MATH_SUCCESS; +} + +/** + * @} end of Pooling group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/PoolingFunctions/arm_pool_q7_HWC.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/PoolingFunctions/arm_pool_q7_HWC.c new file mode 100755 index 00000000000..14e0ef7a24c --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/CMSIS/NN/PoolingFunctions/arm_pool_q7_HWC.c @@ -0,0 +1,454 @@ +/* + * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_pool_q7_HWC.c + * Description: Pooling function implementations + * + * $Date: 17. January 2018 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "arm_math.h" +#include "arm_nnfunctions.h" + +#if defined (ARM_MATH_DSP) + +/** + * @brief A few utility functions used by pooling functions + * + * + */ + +static void buffer_scale_back_q15_to_q7(q15_t * buffer, q7_t * target, uint16_t length, uint16_t scale) +{ + int i; + + for (i = 0; i < length; i++) + { + target[i] = (q7_t) (buffer[i] / scale); + } +} + +static void compare_and_replace_if_larger_q7(q7_t * base, // base data + const q7_t * target, // compare target + const uint16_t length // data length + ) +{ + q7_t *pIn = base; + const q7_t *pCom = target; + union arm_nnword in; + union arm_nnword com; + uint16_t cnt = length >> 2; + + while (cnt > 0u) + { + in.word = arm_nn_read_q7x4((const q7_t*)pIn); + com.word = arm_nn_read_q7x4_ia((const q7_t**)&pCom); + + // if version + if (com.bytes[0] > in.bytes[0]) + in.bytes[0] = com.bytes[0]; + if (com.bytes[1] > in.bytes[1]) + in.bytes[1] = com.bytes[1]; + if (com.bytes[2] > in.bytes[2]) + in.bytes[2] = com.bytes[2]; + if (com.bytes[3] > in.bytes[3]) + in.bytes[3] = com.bytes[3]; + + *__SIMD32(pIn)++ = in.word; + + cnt--; + } + + cnt = length & 0x3; + while (cnt > 0u) + { + if (*pCom > *pIn) + { + *pIn = *pCom; + } + pIn++; + pCom++; + cnt--; + } +} + +static void accumulate_q7_to_q15(q15_t * base, q7_t * target, const uint16_t length) +{ + q15_t *pCnt = base; + q7_t *pV = target; + q31_t v1, v2, vo1, vo2; + uint16_t cnt = length >> 2; + q31_t in; + + while (cnt > 0u) + { + q31_t value = arm_nn_read_q7x4_ia((const q7_t**)&pV); + v1 = __SXTB16(__ROR(value, 8)); + v2 = __SXTB16(value); +#ifndef ARM_MATH_BIG_ENDIAN + + vo2 = __PKHTB(v1, v2, 16); + vo1 = __PKHBT(v2, v1, 16); + +#else + + vo1 = __PKHTB(v1, v2, 16); + vo2 = __PKHBT(v2, v1, 16); + +#endif + + in = arm_nn_read_q15x2(pCnt); + *__SIMD32(pCnt)++ = __QADD16(vo1, in); + + in = arm_nn_read_q15x2(pCnt); + *__SIMD32(pCnt)++ = __QADD16(vo2, in); + + cnt--; + } + cnt = length & 0x3; + while (cnt > 0u) + { + *pCnt++ += *pV++; + cnt--; + } +} + +#endif // ARM_MATH_DSP + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup Pooling + * @{ + */ + + /** + * @brief Q7 max pooling function + * @param[in, out] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimention + * @param[in] ch_im_in number of input tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA Not used + * @param[in,out] Im_out pointer to output tensor + * + * @details + * + * The pooling function is implemented as split x-pooling then + * y-pooling. + * + * This pooling function is input-destructive. Input data is undefined + * after calling this function. + * + */ + +void +arm_maxpool_q7_HWC(q7_t * Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, const uint16_t dim_im_out, q7_t * bufferA, q7_t * Im_out) +{ + (void)bufferA; +#if defined (ARM_MATH_DSP) + /* Run the following code for Cortex-M4 and Cortex-M7 */ + + int16_t i_x, i_y; + + /* first does the pooling along x axis */ + for (i_y = 0; i_y < dim_im_in; i_y++) + { + + for (i_x = 0; i_x < dim_im_out; i_x++) + { + /* for each output pixel */ + q7_t *target = Im_in + (i_y * dim_im_in + i_x) * ch_im_in; + q7_t *win_start; + q7_t *win_stop; + if (i_x * stride - padding < 0) + { + win_start = target; + } else + { + win_start = Im_in + (i_y * dim_im_in + i_x * stride - padding) * ch_im_in; + } + + if (i_x * stride - padding + dim_kernel >= dim_im_in) + { + win_stop = Im_in + (i_y * dim_im_in + dim_im_in) * ch_im_in; + } else + { + win_stop = Im_in + (i_y * dim_im_in + i_x * stride - padding + dim_kernel) * ch_im_in; + } + + /* first step is to copy over initial data */ + /* arm_copy_q7(win_start, target, ch_im_in); */ + memmove(target, win_start, ch_im_in); + + /* start the max operation from the second part */ + win_start += ch_im_in; + for (; win_start < win_stop; win_start += ch_im_in) + { + compare_and_replace_if_larger_q7(target, win_start, ch_im_in); + } + } + } + + /* then does the pooling along y axis */ + for (i_y = 0; i_y < dim_im_out; i_y++) + { + + /* for each output row */ + q7_t *target = Im_out + i_y * dim_im_out * ch_im_in; + q7_t *row_start; + q7_t *row_end; + /* setting the starting row */ + if (i_y * stride - padding < 0) + { + row_start = Im_in; + } else + { + row_start = Im_in + (i_y * stride - padding) * dim_im_in * ch_im_in; + } + /* setting the stopping row */ + if (i_y * stride - padding + dim_kernel >= dim_im_in) + { + row_end = Im_in + dim_im_in * dim_im_in * ch_im_in; + } else + { + row_end = Im_in + (i_y * stride - padding + dim_kernel) * dim_im_in * ch_im_in; + } + + /* copy over the first row */ + /* arm_copy_q7(row_start, target, dim_im_out * ch_im_in); */ + memmove(target, row_start, dim_im_out * ch_im_in); + + /* move over to next row */ + row_start += ch_im_in * dim_im_in; + + for (; row_start < row_end; row_start += dim_im_in * ch_im_in) + { + compare_and_replace_if_larger_q7(target, row_start, dim_im_out * ch_im_in); + } + } + +#else + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + int16_t i_ch_in, i_x, i_y; + int16_t k_x, k_y; + + for (i_ch_in = 0; i_ch_in < ch_im_in; i_ch_in++) + { + for (i_y = 0; i_y < dim_im_out; i_y++) + { + for (i_x = 0; i_x < dim_im_out; i_x++) + { + int max = -129; + for (k_y = i_y * stride - padding; k_y < i_y * stride - padding + dim_kernel; k_y++) + { + for (k_x = i_x * stride - padding; k_x < i_x * stride - padding + dim_kernel; k_x++) + { + if (k_y >= 0 && k_x >= 0 && k_y < dim_im_in && k_x < dim_im_in) + { + if (Im_in[i_ch_in + ch_im_in * (k_x + k_y * dim_im_in)] > max) + { + max = Im_in[i_ch_in + ch_im_in * (k_x + k_y * dim_im_in)]; + } + } + } + } + Im_out[i_ch_in + ch_im_in * (i_x + i_y * dim_im_out)] = max; + } + } + } + +#endif /* ARM_MATH_DSP */ + +} + + /** + * @brief Q7 average pooling function + * @param[in,out] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimention + * @param[in] ch_im_in number of input tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] Im_out pointer to output tensor + * + * @details + * + * Buffer size: + * + * bufferA size: 2*dim_im_out*ch_im_in + * + * The pooling function is implemented as split x-pooling then + * y-pooling. + * + * This pooling function is input-destructive. Input data is undefined + * after calling this function. + * + */ + +void +arm_avepool_q7_HWC(q7_t * Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, const uint16_t dim_im_out, q7_t * bufferA, q7_t * Im_out) +{ + +#if defined (ARM_MATH_DSP) + /* Run the following code for Cortex-M4 and Cortex-M7 */ + + q15_t *buffer = (q15_t *) bufferA; + int16_t i_x, i_y; + int16_t count = 0; + + /* first does the pooling along x axis */ + for (i_y = 0; i_y < dim_im_in; i_y++) + { + + for (i_x = 0; i_x < dim_im_out; i_x++) + { + /* for each output pixel */ + q7_t *target = Im_in + (i_y * dim_im_in + i_x) * ch_im_in; + q7_t *win_start; + q7_t *win_stop; + if (i_x * stride - padding < 0) + { + win_start = target; + } else + { + win_start = Im_in + (i_y * dim_im_in + i_x * stride - padding) * ch_im_in; + } + + if (i_x * stride - padding + dim_kernel >= dim_im_in) + { + win_stop = Im_in + (i_y * dim_im_in + dim_im_in) * ch_im_in; + } else + { + win_stop = Im_in + (i_y * dim_im_in + i_x * stride - padding + dim_kernel) * ch_im_in; + } + + /* first step is to copy over initial data */ + arm_q7_to_q15_no_shift(win_start, buffer, ch_im_in); + count = 1; + + /* start the max operation from the second part */ + win_start += ch_im_in; + for (; win_start < win_stop; win_start += ch_im_in) + { + accumulate_q7_to_q15(buffer, win_start, ch_im_in); + count++; + } + buffer_scale_back_q15_to_q7(buffer, target, ch_im_in, count); + } + } + + /* then does the pooling along y axis */ + for (i_y = 0; i_y < dim_im_out; i_y++) + { + /* for each output row */ + q7_t *target = Im_out + i_y * dim_im_out * ch_im_in; + q7_t *row_start; + q7_t *row_end; + /* setting the starting row */ + if (i_y * stride - padding < 0) + { + row_start = Im_in; + } else + { + row_start = Im_in + (i_y * stride - padding) * dim_im_in * ch_im_in; + } + /* setting the stopping row */ + if (i_y * stride - padding + dim_kernel >= dim_im_in) + { + row_end = Im_in + dim_im_in * dim_im_in * ch_im_in; + } else + { + row_end = Im_in + (i_y * stride - padding + dim_kernel) * dim_im_in * ch_im_in; + } + + /* copy over the first row */ + arm_q7_to_q15_no_shift(row_start, buffer, dim_im_out * ch_im_in); + count = 1; + + /* move over to next row */ + row_start += ch_im_in * dim_im_in; + + for (; row_start < row_end; row_start += dim_im_in * ch_im_in) + { + accumulate_q7_to_q15(buffer, row_start, dim_im_out * ch_im_in); + count++; + } + buffer_scale_back_q15_to_q7(buffer, target, dim_im_out * ch_im_in, count); + } + +#else + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + + (void)bufferA; + int16_t i_ch_in, i_x, i_y; + int16_t k_x, k_y; + + for (i_ch_in = 0; i_ch_in < ch_im_in; i_ch_in++) + { + for (i_y = 0; i_y < dim_im_out; i_y++) + { + for (i_x = 0; i_x < dim_im_out; i_x++) + { + int sum = 0; + int count = 0; + for (k_y = i_y * stride - padding; k_y < i_y * stride - padding + dim_kernel; k_y++) + { + for (k_x = i_x * stride - padding; k_x < i_x * stride - padding + dim_kernel; k_x++) + { + if (k_y >= 0 && k_x >= 0 && k_y < dim_im_in && k_x < dim_im_in) + { + sum += Im_in[i_ch_in + ch_im_in * (k_x + k_y * dim_im_in)]; + count++; + } + } + } + Im_out[i_ch_in + ch_im_in * (i_x + i_y * dim_im_out)] = sum / count; + } + } + } + +#endif /* ARM_MATH_DSP */ + +} + +/** + * @} end of Pooling group + */ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/nnacl/fp32/exp_fp32.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/nnacl/fp32/exp_fp32.c new file mode 100755 index 00000000000..2d8fa3579df --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/nnacl/fp32/exp_fp32.c @@ -0,0 +1,51 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/fp32/exp_fp32.h" +#include +#include +#include "nnacl/errorcode.h" + +int Exp(const float *input_data, float *output_data, const ExpParameter *parameter, int task_id) { + if (parameter->scale_ == 1) { + for (size_t i = task_id; i < parameter->element_num_; i += parameter->thread_num_) { + output_data[i] = expf(input_data[i]); + } + } else { + for (size_t i = task_id; i < parameter->element_num_; i += parameter->thread_num_) { + output_data[i] = expf(input_data[i] * parameter->in_scale_); + } + } + if (parameter->out_scale_ != 1) { + for (size_t i = task_id; i < parameter->element_num_; i += parameter->thread_num_) { + output_data[i] = output_data[i] * parameter->out_scale_; + } + } + return NNACL_OK; +} + +void ExpFp32(const float *src, float *dst, int num) { + int i = 0; +#ifdef ENABLE_ARM64 + int count = (num / C4NUM) * C4NUM; + for (; i < count; i += C4NUM) { + simd_exp(vld1q_f32(src + i), dst + i); + } +#endif + for (; i < num; ++i) { + single_exp(src[i], dst + i); + } +} diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/nnacl/fp32/softmax_fp32.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/nnacl/fp32/softmax_fp32.c new file mode 100755 index 00000000000..943eaa15e2c --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/nnacl/fp32/softmax_fp32.c @@ -0,0 +1,146 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "nnacl/fp32/softmax_fp32.h" +#include +#include +#include "nnacl/fp32/exp_fp32.h" + +void SoftmaxNorm(const float *src, float *dst, int batch, int channel) { + int cur_batch_offset = 0; + for (int i = 0; i < batch; i++, cur_batch_offset += channel) { + int j = 0; +#ifdef ENABLE_NEON + float32x4_t max4 = vdupq_n_f32(-FLT_MAX); + int count = (channel / C4NUM) * C4NUM; + for (; j < count; j += C4NUM) { + float32x4_t input4 = vld1q_f32(src + cur_batch_offset + j); + max4 = vmaxq_f32(max4, input4); + } +#ifdef ENABLE_ARM64 + float max = vmaxvq_f32(max4); +#else + float max = max4[0]; + for (int m = 1; m < 4; ++m) { + max = MSMAX(max, max4[m]); + } +#endif +#else + float max = -FLT_MAX; +#endif + for (; j < channel; j++) { + float input = src[cur_batch_offset + j]; + if (input > max) { + max = input; + } + } + int k = 0; +#ifdef ENABLE_NEON + int count2 = (channel / C4NUM) * C4NUM; + for (; k < count2; k += C4NUM) { + float32x4_t input4 = vld1q_f32(src + cur_batch_offset + k); + float32x4_t output4 = vsubq_f32(input4, vdupq_n_f32(max)); + vst1q_f32(dst + cur_batch_offset + k, output4); + } +#endif + for (; k < channel; k++) { + int offset = cur_batch_offset + k; + dst[offset] = src[offset] - max; + } + } +} + +void SumAndDiv(const float *src, float *dst, int batch, int channel) { + int cur_batch_offset = 0; + for (int i = 0; i < batch; i++, cur_batch_offset += channel) { + float sum = 0; + int j = 0; +#ifdef ENABLE_NEON + float32x4_t sum4 = vdupq_n_f32(0); + int count = (channel / C4NUM) * C4NUM; + for (; j < count; j += C4NUM) { + sum4 = vaddq_f32(sum4, vld1q_f32(src + cur_batch_offset + j)); + } +#ifdef ENABLE_ARM64 + sum = vaddvq_f32(sum4); +#else + sum = sum4[0] + sum4[1] + sum4[2] + sum4[3]; +#endif +#endif + for (; j < channel; j++) { + sum += src[cur_batch_offset + j]; + } + int k = 0; +#ifdef ENABLE_NEON + const float div = 1.0f / sum; + for (; k < count; k += C4NUM) { + vst1q_f32(dst + cur_batch_offset + k, vmulq_n_f32(vld1q_f32(src + cur_batch_offset + k), div)); + } +#endif + for (; k < channel; k++) { + dst[cur_batch_offset + k] = src[cur_batch_offset + k] / sum; + } + } +} + +void SoftmaxLastAxis(const float *src, float *dst, int batch, int channel) { + SoftmaxNorm(src, dst, batch, channel); + ExpFp32(dst, dst, batch * channel); + SumAndDiv(dst, dst, batch, channel); +} + +// output = exp(input) / reduce_sum(exp(input), axis) +void Softmax(const float *input_ptr, float *output_ptr, float *sum_data, const SoftmaxParameter *parameter) { + int axis = parameter->axis_; + int n_dim = parameter->n_dim_; + const int *input_shape = parameter->input_shape_; + int inner_size = 1; + int outter_size = 1; + + for (int i = 0; i < axis; i++) { + outter_size *= input_shape[i]; + } + for (int i = axis + 1; i < n_dim; i++) { + inner_size *= input_shape[i]; + } + for (int i = 0; i < outter_size; i++) { + int outter_offset = i * input_shape[axis] * inner_size; + int sum_outter_offset = i * inner_size; + for (int k = 0; k < inner_size; k++) { + int inner_offset = outter_offset + k; + float max_data = input_ptr[inner_offset]; + for (int j = 0; j < input_shape[axis]; j++) { + int axis_offset = inner_offset + j * inner_size; + max_data = max_data > input_ptr[axis_offset] ? max_data : input_ptr[axis_offset]; + } + for (int j = 0; j < input_shape[axis]; j++) { + int axis_offset = inner_offset + j * inner_size; + output_ptr[axis_offset] = exp(input_ptr[axis_offset] - max_data); + sum_data[k + sum_outter_offset] += output_ptr[axis_offset]; + } + } + } + for (int i = 0; i < outter_size; i++) { + int outter_offset = i * input_shape[axis] * inner_size; + int sum_outter_offset = i * inner_size; + for (int j = 0; j < input_shape[axis]; j++) { + int axis_offset = outter_offset + j * inner_size; + for (int k = 0; k < inner_size; k++) { + int inner_offset = axis_offset + k; + output_ptr[inner_offset] = output_ptr[inner_offset] / sum_data[k + sum_outter_offset]; + } + } + } +} diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/nnacl/int8/quant_dtype_cast_int8.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/nnacl/int8/quant_dtype_cast_int8.c new file mode 100755 index 00000000000..0ec6fc72f52 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/kernels/nnacl/int8/quant_dtype_cast_int8.c @@ -0,0 +1,125 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "nnacl/int8/quant_dtype_cast_int8.h" +#include "nnacl/errorcode.h" + +int DoDequantizeInt8ToFp32(const int8_t *quant_values, float *real_values, float scale, int32_t zp, int size) { + if (quant_values == NULL || real_values == NULL) { + return NNACL_PARAM_INVALID; + } + + for (int i = 0; i < size; ++i) { + real_values[i] = (quant_values[i] - zp) * scale; + } + return NNACL_OK; +} + +int DoQuantizeFp32ToInt8(const float *real_values, int8_t *quant_values, float scale, int32_t zp, int size, + bool uint8_flag) { + if (quant_values == NULL || real_values == NULL) { + return NNACL_PARAM_INVALID; + } + + if (uint8_flag) { + zp += 128; + } + const float inverse_scale = 1.0f / scale; + for (int i = 0; i < size; ++i) { + if (isinf(real_values[i])) { + quant_values[i] = 127; + } else { + int temp = round(real_values[i] * inverse_scale + zp); + if (uint8_flag) { + temp -= 128; + } + temp = temp < 127 ? temp : 127; + temp = temp > -128 ? temp : -128; + quant_values[i] = (int8_t)temp; + } + } + return NNACL_OK; +} + +int DoDequantizeUInt8ToFp32(const uint8_t *quant_values, float *real_values, float scale, int32_t zp, int size) { + if (quant_values == NULL || real_values == NULL) { + return NNACL_PARAM_INVALID; + } + + for (int i = 0; i < size; ++i) { + real_values[i] = (float)((int)quant_values[i] - zp) * scale; + } + return NNACL_OK; +} + +int DoQuantizeFp32ToUInt8(const float *real_values, uint8_t *quant_values, float scale, int32_t zp, int size) { + if (quant_values == NULL || real_values == NULL) { + return NNACL_PARAM_INVALID; + } + + for (int i = 0; i < size; ++i) { + if (isinf(real_values[i])) { + quant_values[i] = 255; + } else { + float temp = (float)round(real_values[i] * 1.0 / scale + zp); + if (temp > 255) { + quant_values[i] = 255; + } else if (temp < 0) { + quant_values[i] = 0; + } else { + quant_values[i] = (uint8_t)temp; + } + } + } + return NNACL_OK; +} + +int Int8ToUInt8(const int8_t *quant_values, uint8_t *real_values, int size) { + if (quant_values == NULL || real_values == NULL) { + return NNACL_PARAM_INVALID; + } + + for (int i = 0; i < size; ++i) { + int temp = quant_values[i] + 128; + if (temp > 255) { + real_values[i] = (uint8_t)255; + } else if (temp < 0) { + real_values[i] = 0; + } else { + real_values[i] = (uint8_t)temp; + } + } + return NNACL_OK; +} + +int UInt8ToInt8(const uint8_t *real_values, int8_t *quant_values, int size) { + if (quant_values == NULL || real_values == NULL) { + return NNACL_PARAM_INVALID; + } + + for (int i = 0; i < size; ++i) { + int temp = real_values[i] - 128; + if (temp > 127) { + quant_values[i] = 127; + } else if (temp < -128) { + quant_values[i] = -128; + } else { + quant_values[i] = (int8_t)temp; + } + } + return NNACL_OK; +} diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/src/string.cc b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/src/string.cc new file mode 100755 index 00000000000..4a519c9d076 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/operator_library/src/string.cc @@ -0,0 +1,288 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifdef NOT_USE_STL +#include +#include +#include +#include +#include +#include "include/lite_utils.h" + +namespace mindspore { +String::String() { + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; +} + +String::String(size_t count, char ch) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * (count + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memset(buffer_, ch, count); + buffer_[count] = '\0'; + size_ = count; +} +String::String(const char *s, size_t count) { + if (s == nullptr) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; + return; + } + size_t size_s = strlen(s); + if (size_s <= count) { + size_ = size_s; + } else { + size_ = count; + } + buffer_ = reinterpret_cast(malloc(sizeof(char) * (size_ + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + strncpy(buffer_, s, size_); + buffer_[size_] = '\0'; +} + +String::String(const char *s) { + if (s == nullptr) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; + return; + } + size_ = strlen(s); + buffer_ = reinterpret_cast(malloc(sizeof(char) * (size_ + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memcpy(buffer_, s, size_ + 1); +} + +String::String(const String &other) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * (other.size_ + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + size_ = other.size_; + memcpy(buffer_, other.buffer_, size_ + 1); +} + +String::String(const String &other, size_t pos, size_t count) { + if (pos >= other.size_) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; + } else { + if (count == npos) { + count = other.size_ - pos; + } + if (pos + count > other.size_) { + size_ = other.size_ - pos; + } else { + size_ = count; + } + buffer_ = reinterpret_cast(malloc(sizeof(char) * (size_ + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + strncpy(buffer_, other.buffer_ + pos, size_); + buffer_[size_] = '\0'; + } +} + +String::~String() { free(buffer_); } + +String &String::operator=(const String &str) { + if (this == &str) { + return *this; + } + free(buffer_); + buffer_ = reinterpret_cast(malloc(sizeof(char) * (str.size_ + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + size_ = str.size_; + memcpy(buffer_, str.buffer_, size_ + 1); + return *this; +} + +String &String::operator=(const char *str) { + free(buffer_); + if (str == nullptr) { + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; + return *this; + } + size_t size_s = strlen(str); + buffer_ = reinterpret_cast(malloc(sizeof(char) * (size_s + 1))); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + size_ = size_s; + memcpy(buffer_, str, size_ + 1); + return *this; +} + +char &String::at(size_t pos) { + if (pos >= size_) { + MS_C_EXCEPTION("pos out of range"); + } + return buffer_[pos]; +} +const char &String::at(size_t pos) const { + if (pos >= size_) { + MS_C_EXCEPTION("pos out of range"); + } + return buffer_[pos]; +} +char &String::operator[](size_t pos) { + if (pos >= size_) { + MS_C_EXCEPTION("pos out of range"); + } + return this->at(pos); +} +const char &String::operator[](size_t pos) const { + if (pos >= size_) { + MS_C_EXCEPTION("pos out of range"); + } + return this->at(pos); +} +char *String::data() noexcept { return buffer_; }; +const char *String::data() const noexcept { return buffer_; } +const char *String::c_str() const noexcept { return buffer_; } + +// capacity +bool String::empty() const noexcept { return size_ == 0; } +size_t String::size() const noexcept { return size_; } +size_t String::length() const noexcept { return size_; } + +// operations +void String::clear() noexcept { + free(buffer_); + buffer_ = reinterpret_cast(malloc(sizeof(char) * 1)); + if (buffer_ == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + buffer_[0] = '\0'; + size_ = 0; +} + +String &String::operator+(const String &str) { + (*this) += str; + return *this; +} + +String &String::operator+=(const String &str) { + size_t new_size = size_ + str.size_; + char *tmp = reinterpret_cast(malloc(sizeof(char) * (new_size + 1))); + if (tmp == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memcpy(tmp, this->buffer_, size_ + 1); + strncat(tmp, str.buffer_, str.size_); + tmp[new_size] = '\0'; + free(buffer_); + buffer_ = tmp; + size_ = new_size; + return *this; +} + +String &String::operator+=(const char *str) { + if (str == nullptr) { + return *this; + } + size_t str_size = strlen(str); + size_t new_size = size_ + str_size; + char *tmp = reinterpret_cast(malloc(sizeof(char) * (new_size + 1))); + if (tmp == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memcpy(tmp, this->buffer_, size_ + 1); + strncat(tmp, str, strlen(str)); + tmp[new_size] = '\0'; + free(buffer_); + buffer_ = tmp; + size_ = new_size; + return *this; +} + +String &String::operator+=(const char ch) { + char *tmp = reinterpret_cast(malloc(sizeof(char) * (size_ + 2))); + if (tmp == nullptr) { + MS_C_EXCEPTION("malloc data failed"); + } + memcpy(tmp, this->buffer_, size_ + 1); + tmp[size_] = ch; + tmp[size_ + 1] = '\0'; + free(buffer_); + buffer_ = tmp; + size_ += 1; + return *this; +} + +String &String::append(size_t count, const char ch) { + (*this) += ch; + return *this; +} +String &String::append(const String &str) { + (*this) += str; + return *this; +} +String &String::append(const char *str) { + if (str == nullptr) { + return *this; + } + (*this) += str; + return *this; +} + +int String::compare(const String &str) const { return strcmp(buffer_, str.buffer_); } +int String::compare(const char *str) const { return strcmp(buffer_, str); } + +String String::substr(size_t pos, size_t count) const { return String(*this, pos, count); } + +String to_String(int32_t value) { + char tmp[sizeof(int32_t) * 4]; + snprintf(tmp, sizeof(int32_t) * 4, "%d", value); + return String(tmp, strlen(tmp)); +} + +String to_String(float value) { + char tmp[FLT_MAX_10_EXP + 20]; + snprintf(tmp, FLT_MAX_10_EXP + 20, "%f", value); + return String(tmp, strlen(tmp)); +} +} // namespace mindspore +#endif // NOT_USE_STL diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/CMakeLists.txt b/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/CMakeLists.txt new file mode 100755 index 00000000000..52243bdcfc2 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/CMakeLists.txt @@ -0,0 +1,85 @@ + + +cmake_minimum_required(VERSION 3.14) +project(net) + +if(NOT DEFINED PKG_PATH) + message(FATAL_ERROR "PKG_PATH not set") +endif() + +get_filename_component(PKG_PATH ${PKG_PATH} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR}) + +set(OP_LIB ${PKG_PATH}/tools/codegen/operator_library/lib/libops.a) +set(OP_HEADER_PATH ${PKG_PATH}/tools/codegen/operator_library/include) +set(HEADER_PATH ${PKG_PATH}/inference) + +message("operator lib path: ${OP_LIB}") +message("operator header path: ${OP_HEADER_PATH}") + +add_compile_definitions(NOT_USE_STL) + +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include) +include_directories(${OP_HEADER_PATH}) +include_directories(${HEADER_PATH}) + +include(net.cmake) + +option(MICRO_BUILD_ARM64 "build android arm64" OFF) +option(MICRO_BUILD_ARM32A "build android arm32" OFF) + +if(MICRO_BUILD_ARM64 OR MICRO_BUILD_ARM32A) + add_compile_definitions(ENABLE_NEON) + add_compile_definitions(ENABLE_ARM) +endif() + +if(MICRO_BUILD_ARM64) + add_compile_definitions(ENABLE_ARM64) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8.2-a+dotprod") +endif() + +if(MICRO_BUILD_ARM32A) + add_compile_definitions(ENABLE_ARM32) + add_definitions(-mfloat-abi=softfp -mfpu=neon) +endif() + +set(CMAKE_C_FLAGS "${CMAKE_ENABLE_C99} ${CMAKE_C_FLAGS}") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17") +if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default") +else() + set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \ + -Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}") + set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \ + -Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}") +endif() + +function(create_library) + add_custom_command(TARGET net + POST_BUILD + COMMAND rm -rf tmp + COMMAND mkdir tmp + COMMAND cd tmp && ar -x ${OP_LIB} + COMMAND echo "raw static library ${library_name} size:" + COMMAND ls -lh ${library_name} + COMMAND mv ${library_name} ./tmp && cd tmp && ar -x ${library_name} + COMMENT "unzip raw static library ${library_name}" + ) + foreach(object_file ${OP_SRC}) + add_custom_command(TARGET net POST_BUILD COMMAND mv ./tmp/${object_file} .) + endforeach() + add_custom_command(TARGET net + POST_BUILD + COMMAND ar cr ${library_name} *.o + COMMAND ranlib ${library_name} + COMMAND echo "new static library ${library_name} size:" + COMMAND ls -lh ${library_name} + COMMAND rm -rf tmp && rm -rf *.o + COMMENT "generate specified static library ${library_name}" + ) +endfunction(create_library) +string(CONCAT library_name "lib" net ".a") +create_library() + diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/mnist_input_data.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/mnist_input_data.h new file mode 100755 index 00000000000..deff66a545e --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/mnist_input_data.h @@ -0,0 +1,200 @@ +// +// Created by Big_CoCo on 2021/3/22. +// +float mnist_inputs_data[784] = { + -1.5255959e+00, -7.5023180e-01, -6.5398091e-01, -1.6094848e+00, + -1.0016718e-01, -6.0918891e-01, -9.7977227e-01, -1.6090963e+00, + -7.1214461e-01, 3.0372199e-01, -7.7731431e-01, -2.5145525e-01, + -2.2227049e-01, 1.6871134e+00, 2.2842517e-01, 4.6763551e-01, + -6.9697243e-01, -1.1607615e+00, 6.9954240e-01, 1.9908163e-01, + 8.6569238e-01, 2.4440390e-01, -6.6291136e-01, 8.0730826e-01, + 1.1016806e+00, -1.7593604e-01, -2.2455578e+00, -1.4464580e+00, + 6.1155282e-02, -6.1774445e-01, -7.9806983e-01, -1.3162321e-01, + 1.8793458e+00, -7.2131783e-02, 1.5777060e-01, -7.7345490e-01, + 1.9905651e-01, 4.5702778e-02, 1.5295692e-01, -4.7567880e-01, + -1.1101983e-01, 2.9273525e-01, -1.5784515e-01, -2.8787140e-02, + 2.3571110e+00, -1.0373387e+00, 1.5747981e+00, -6.2984723e-01, + -9.2739171e-01, 5.4514152e-01, 6.6280261e-02, -4.3704012e-01, + 7.6260060e-01, 4.4151092e-01, 1.1651385e+00, 2.0153918e+00, + 1.3741246e-01, 9.3864471e-01, -1.8600109e-01, -6.4463931e-01, + 1.5392458e+00, -8.6958760e-01, -3.3311536e+00, -7.4787223e-01, + -2.5502462e-02, -1.0233306e+00, -5.9618515e-01, -1.0055307e+00, + -2.1060631e-01, -7.5475276e-03, 1.6734272e+00, 1.0342831e-02, + -7.0395666e-01, -1.8526579e-01, -9.9623507e-01, -8.3125526e-01, + -4.6102202e-01, -5.6008244e-01, 3.9557618e-01, -9.8227710e-01, + -5.0648659e-01, 9.9775404e-02, -6.5397340e-01, 7.3169369e-01, + -1.4343859e+00, -5.0081307e-01, 1.7163314e-01, -1.5999313e-01, + 2.5463349e-01, -5.0195730e-01, -1.0412000e+00, 7.3226720e-01, + -1.0483401e+00, -4.7087720e-01, 2.9113635e-01, 1.9907043e+00, + 6.6144532e-01, 1.1899205e+00, 8.1653392e-01, -9.1352361e-01, + 1.3851457e+00, -8.1384623e-01, -9.2757654e-01, 1.1119633e+00, + 1.3352057e+00, 6.0427362e-01, -1.0344208e-01, -1.5121692e-01, + -2.1020830e+00, -6.2002194e-01, -1.4782310e+00, -1.1334175e+00, + 8.7379628e-01, -5.6025940e-01, 1.2857845e+00, 8.1682384e-01, + 2.0530410e-01, 3.0510718e-01, 5.3568703e-01, -4.3118501e-01, + 2.5581384e+00, -2.3336388e-01, -1.3472130e-02, 1.8606348e+00, + -1.9804063e+00, 1.7985829e+00, 1.0181159e-01, 3.4000599e-01, + 7.1236455e-01, -1.7765073e+00, 3.5386458e-01, 1.1996132e+00, + -3.0299741e-01, -1.7618417e+00, 6.3484460e-01, -8.0435908e-01, + -1.6111118e+00, -1.8716129e+00, 5.4308361e-01, 6.6067863e-01, + 2.2952116e+00, 6.7490596e-01, 1.7133216e+00, -1.7942734e+00, + -1.3632672e+00, -9.8321962e-01, 1.5112667e+00, 6.4187074e-01, + 4.7296381e-01, -4.2859009e-01, 5.5137074e-01, -1.5473709e+00, + 5.1811212e-01, 1.0653535e-01, 2.6924077e-01, 1.3247679e+00, + 1.7460191e+00, 1.8549690e+00, -7.0636910e-01, 2.5570862e+00, + 4.1753429e-01, -2.1271861e-01, -8.3995801e-01, -4.2001787e-01, + -6.2403631e-01, -9.7729611e-01, 8.7484282e-01, 9.8728138e-01, + 3.0957633e-01, 1.5206900e+00, 1.2052339e+00, -1.8155910e+00, + -4.0346155e-01, -9.5914519e-01, -5.2077039e-03, -7.8863136e-02, + 8.4365427e-01, 1.1657013e+00, 5.2693218e-01, 1.6192533e+00, + -9.6397626e-01, 1.4152038e-01, -1.6366096e-01, -3.5822257e-01, + 1.7222793e+00, -3.0357561e-01, 2.3887420e-01, 1.3440012e+00, + 1.0322569e-01, 1.1003542e+00, -3.4168020e-01, 9.4733888e-01, + -5.6851596e-01, 8.3759618e-01, 1.7836607e+00, -1.9542466e-01, + 5.1491612e-01, -1.8474776e+00, -2.9167426e+00, -5.6732988e-01, + -5.4128021e-01, 8.9517403e-01, -8.8250703e-01, 5.3181124e-01, + -1.5457772e+00, -1.7329982e-01, 7.2824633e-01, 5.7061020e-02, + 9.0551722e-01, 1.0462948e+00, -5.2059698e-01, 1.3547838e+00, + 2.3519313e-01, 1.9142433e+00, 1.8364111e+00, 1.3245324e+00, + -9.6900916e-01, 1.2516364e+00, 1.2103242e+00, -5.2792060e-01, + 2.1856615e-01, -5.7430726e-01, 1.4571251e+00, 1.7709557e+00, + 1.6499138e+00, -4.3200457e-01, -2.7102691e-01, -1.4391626e+00, + 1.2470404e+00, 1.2738512e+00, 3.9094925e-01, 3.8721049e-01, + -7.9828717e-02, 3.4172431e-01, 9.4882733e-01, -1.3839359e+00, + 1.7240863e+00, -2.3647652e+00, -9.2949092e-01, 2.9362530e-01, + 2.1513203e-01, 9.3846369e-01, 1.4657077e+00, -5.5647439e-01, + -7.4484080e-01, -2.0215721e-01, -2.2966790e-01, 1.3313366e-03, + 3.7527591e-01, -5.8106792e-01, -5.7230884e-01, 1.0097175e+00, + -1.0564939e-01, -1.1796960e+00, -9.0779595e-02, 5.6311435e-01, + -1.2560141e+00, 8.9555502e-01, 1.6747737e-01, 7.5142086e-01, + 2.4142299e+00, 1.0205840e+00, -4.4048381e-01, -1.7341677e+00, + -1.2362250e+00, 1.5785813e+00, -1.1160507e+00, 7.6777023e-01, + -5.8820677e-01, 2.1188903e+00, -5.4219025e-01, -2.4592547e+00, + -1.1108288e+00, -1.1187209e+00, 7.5799555e-01, -4.9565765e-01, + -1.9700006e-01, -3.3396218e-02, 7.1929151e-01, 1.0644146e+00, + 8.3402544e-01, -1.9162164e+00, -3.4202927e-01, -6.6049206e-01, + 3.1508535e-01, 1.1422518e+00, 3.0550566e-01, -5.7888174e-01, + -2.3828252e-01, -1.3541743e+00, 2.6868939e-01, 1.1455697e-01, + -1.5562972e+00, -1.0757437e+00, -8.7519461e-01, -4.7281876e-01, + 9.9123681e-01, -5.8622282e-02, 1.1787646e+00, 6.2218499e-01, + 7.8785008e-01, 1.3685523e+00, -8.5068983e-01, 5.1260746e-01, + 1.0476325e+00, -3.1758463e-01, 1.3948506e-01, 2.3402624e+00, + -6.1160916e-01, 8.1602710e-01, 2.4772300e-01, -3.8672671e-01, + 1.9948451e-01, 7.9926956e-01, -2.6190341e-01, 1.5132962e-01, + 1.1981666e+00, -2.2832582e+00, -1.0129594e+00, -8.8789088e-01, + 6.5221924e-01, -8.7262028e-01, 3.5253752e-02, -3.3653030e-01, + 1.4023319e+00, 4.8412141e-01, -7.0304507e-01, -8.2676607e-01, + 7.7439600e-01, 6.9199395e-01, -1.0184799e+00, -8.0337167e-01, + -7.0711321e-01, 7.5211829e-01, -1.9208279e-02, 1.1033330e+00, + -6.0679215e-01, -5.2522349e-01, -5.6618774e-01, 6.6039857e-04, + 7.2245878e-01, 1.5263520e-01, 1.4495978e-01, -2.3442194e+00, + 3.6000299e-01, 4.6668175e-01, 1.2830665e+00, 1.2678007e+00, + 1.9883296e-01, 5.4408771e-01, -3.9781693e-01, -1.9291055e+00, + 2.3236869e-01, 8.6146563e-01, 6.2175733e-01, -1.7811896e+00, + -7.8206092e-01, -1.4236701e+00, 1.6090765e+00, -3.2787595e-02, + 8.5323340e-01, 5.5063650e-02, -1.7425371e+00, 8.7500376e-01, + -2.7188172e+00, -2.2192061e-01, 3.4208494e-01, 1.1093477e+00, + -5.7314759e-01, 9.5778459e-01, 9.8202319e-04, -1.3847686e+00, + -9.9650228e-01, 8.0734813e-01, 1.1738863e+00, -9.3984646e-01, + 1.3109189e+00, -3.1670693e-01, -1.8610410e-01, -5.7646018e-01, + 6.8665183e-01, 4.2086706e-01, -1.0213808e+00, 9.8856664e-01, + -5.6187165e-01, -1.5792575e-01, 1.5042593e+00, -1.3950295e+00, + 8.0079097e-01, -6.6194439e-01, 1.2563107e+00, 4.9999446e-01, + -2.7133808e-01, 1.8469073e+00, -3.1249959e-02, -9.3872704e-02, + -6.1907429e-01, -6.3632655e-01, -4.2415860e-01, -2.0271668e+00, + 4.0962908e-01, -1.5421267e+00, -1.0128618e+00, -2.9737514e-02, + -2.8895226e-01, 1.5219319e-01, -2.9803404e-01, -1.3135384e-01, + -6.2809873e-01, 1.1968799e+00, 6.1099350e-01, -4.5477438e-01, + -9.6037018e-01, 2.7690458e-01, -6.8010890e-01, -5.4578751e-01, + -4.5518342e-01, 3.1859580e-01, -3.5494208e-01, 6.8589437e-01, + -3.7613729e-01, -2.4106996e+00, -1.2778088e+00, -6.2887415e-02, + -9.4712764e-02, -2.3144305e+00, 5.5653399e-01, 5.0569206e-01, + -2.0759584e-01, 6.9363183e-01, 4.1949040e-01, 2.2523544e+00, + 9.3852311e-01, 1.4252927e+00, 1.5083258e+00, 1.0539497e-01, + -1.6049961e+00, -1.0644839e-01, 2.4656655e-01, 6.1250836e-01, + 7.3980182e-01, -1.7860015e-01, 7.8490011e-02, -4.3981805e-01, + -3.6079338e-01, -1.2617406e+00, 1.9146918e+00, -1.8612741e+00, + -9.6749123e-03, 2.6038763e-01, 2.8203353e-01, 2.5829947e-01, + -4.2654869e-01, 9.8075122e-01, 1.8588890e+00, -1.0920147e+00, + 7.6300204e-01, 2.2761525e-01, -1.4569789e+00, 1.7043737e+00, + -3.2686386e+00, 4.7498712e-01, -2.1142473e+00, -1.5002301e+00, + 1.0692973e+00, 1.4393831e+00, 5.0645941e-01, 8.3597529e-01, + 1.1752968e+00, -3.4211743e-01, -3.8716367e-01, 5.4765379e-01, + -1.5891987e-01, -7.3604894e-01, -2.3351878e-01, -5.4039150e-01, + 1.5708433e-01, -5.9762299e-01, -8.8390934e-01, 6.0767305e-01, + -3.8843614e-01, -3.1578582e-02, -5.6058836e-01, -6.5552413e-01, + 7.2615027e-01, 6.7892069e-01, -4.3017429e-01, -3.8485083e-01, + -1.5082921e+00, -7.1995616e-01, -1.1909670e+00, 1.3271062e+00, + -2.1984124e+00, 2.8614265e-01, -2.0104712e-01, -2.5348804e+00, + -1.5848289e+00, 2.1679449e-01, -1.4276333e-01, 1.4274154e+00, + 1.6425379e-01, -3.1606898e-01, 1.2852281e-01, -5.2765143e-01, + 1.0834497e+00, 7.2746372e-01, 5.7725620e-01, 5.3688127e-01, + -4.3616110e-01, 2.7676934e-01, 2.9459488e-01, -5.6314898e-01, + 5.1899290e-01, 1.3394899e+00, -2.3876244e-01, -6.7961216e-02, + -1.5035529e-01, 5.2330041e-01, -2.1156418e-01, -1.2541972e+00, + 1.8176029e-02, 1.4141930e+00, -1.7437581e+00, 1.1289321e-01, + 4.5267120e-01, 3.1554270e-01, -6.9010293e-01, -2.8289640e-01, + 3.5618150e-01, -6.5616649e-01, 6.7499673e-01, 1.2909728e+00, + 2.8768075e-01, 1.1313233e+00, -1.9227705e-03, -2.3545134e-01, + -7.7834469e-01, 1.7674841e-02, 1.1869689e+00, -5.9568787e-01, + -1.5738513e+00, 9.0094990e-01, 1.0499262e+00, 4.2925611e-01, + 3.4665063e-01, 1.1960464e+00, 5.0744399e-02, -2.4047236e+00, + 6.6365647e-01, -3.9687249e-01, 4.0486488e-01, 3.4154087e-01, + -5.9558362e-01, 1.1019011e+00, 5.5386519e-01, -9.5087808e-01, + -5.0393552e-01, 1.7358937e+00, 1.1365190e+00, 7.3528785e-01, + -6.3713288e-01, -8.8953024e-01, 5.9735751e-01, -6.1928016e-01, + 1.2089928e+00, 8.0966818e-01, -3.7273017e-01, -5.3331411e-01, + -4.9985203e-01, 3.9947726e-02, -7.8146380e-01, 3.1946027e-01, + 8.2106584e-01, 8.6431539e-01, 4.9166805e-01, 4.4538009e-01, + -8.8726664e-01, 5.2979738e-01, 2.6839951e-01, 3.5011527e-01, + -2.7225810e-01, 1.0665658e+00, -8.9532214e-01, 1.4147978e+00, + -9.1728181e-01, 8.3720893e-01, 1.4950181e+00, -8.3034581e-01, + -1.9900607e+00, -8.7786657e-01, 2.2035673e-01, -1.9547749e+00, + 8.5329479e-01, -1.4188342e+00, 9.8297036e-01, -5.3868419e-01, + 1.3784917e-01, 9.2474985e-01, 2.9384881e-01, 3.0301414e+00, + -1.4259109e+00, 3.3642095e-01, -6.0710046e-02, -2.7827954e+00, + 1.3488874e+00, 2.6844734e-01, -1.1277022e+00, -5.9944046e-01, + -2.7945054e-01, -2.1999671e-01, 1.1315615e+00, -5.5813056e-01, + -8.4985018e-01, -5.9133893e-01, 9.1871524e-01, -1.7054160e+00, + -6.2452555e-01, -1.5477768e+00, -4.3917063e-01, -8.2900178e-01, + -4.2779538e-01, 1.2994735e+00, -1.0199753e+00, -8.5336286e-01, + -1.8470149e+00, -5.6316632e-01, -2.9311785e-01, -1.5726203e+00, + -1.0079967e+00, -1.1254747e+00, 2.0839548e+00, 2.8445369e-01, + -2.0898786e-01, 2.7948596e+00, 9.4693983e-01, 1.1613066e+00, + 2.1592824e-02, 2.1849406e+00, 3.7046966e-01, 8.3229375e-01, + 1.0294781e+00, -4.6743554e-01, 1.2099822e+00, -9.2927051e-01, + 1.5964565e+00, -3.5177864e-02, 1.9276363e-01, 9.4458717e-01, + 4.0307879e-01, 7.8339100e-01, 1.6240975e+00, -1.9683785e+00, + 9.2987645e-01, 1.5981036e+00, 4.2616895e-01, 2.5072601e+00, + 4.4090030e-01, -2.0394561e+00, 1.0628663e+00, 7.7601296e-01, + 8.3457164e-02, 1.7073935e+00, -2.0758156e-01, -2.7201766e-01, + -6.5246433e-01, 2.3190866e+00, -3.1556660e-01, 1.2293459e+00, + 1.9086858e-02, 1.6939967e+00, -9.7426087e-01, 1.0000985e-01, + 1.6331865e-01, 1.1104544e+00, 6.5858930e-01, -1.8446711e-01, + -6.9782162e-01, 5.4673910e-01, -1.0919048e+00, -2.0058967e-01, + -2.1976221e-01, -7.5056171e-01, 9.1047740e-01, 1.4996040e+00, + -2.7725294e-01, 9.9202655e-02, -1.5756993e+00, 7.4856669e-01, + -2.4229655e-01, -1.8000333e-01, 9.5837879e-01, 3.7814003e-01, + 1.9289158e-01, 2.4711327e-01, -3.1152922e-01, 4.4534847e-02, + -7.7046400e-01, 4.5658717e-01, -1.3150460e+00, -5.0721991e-01, + 4.1748023e-01, 9.2643857e-01, 6.3569260e-01, -1.6128796e-01, + 1.0286627e+00, 4.7581047e-02, 4.1486391e-01, -2.7009306e+00, + -1.5045499e+00, -1.8634710e-01, -9.3207240e-01, 3.0545831e-01, + -5.1035285e-01, 8.7927073e-01, 1.7738712e+00, -1.3286506e-01, + 1.3458737e+00, -4.6432903e-01, -3.7430039e-01, 9.7058731e-01, + -1.9518436e+00, -6.4998013e-01, 1.3482264e+00, 3.0995172e-01, + -1.5216483e+00, 9.7610706e-01, 3.9083481e-01, 2.7913565e-02, + -4.1744223e-01, 1.7064806e+00, -2.5080970e-01, -3.3612009e-02, + 5.8338016e-01, 1.6178854e+00, -1.3733586e+00, -8.5550433e-01, + 1.5778065e+00, 1.0752751e-01, 1.1045673e+00, 5.9758538e-01, + 7.1269102e-02, -5.0374931e-01, 8.0341589e-01, 1.1834451e+00, + 6.3811505e-01, -5.0269210e-01, -9.9724096e-01, -5.6425828e-01, + -3.4610125e-01, 2.7074468e-01, -1.3578615e+00, -9.6113062e-01, + 1.1768451e+00, 1.1981529e-01, 6.6130060e-01, 1.7996032e+00, + -1.4726470e+00, -1.4529139e+00, 2.5632006e-01, -7.5283742e-01, + 1.2143371e+00, 5.3680718e-01, -5.9180927e-01, 1.1358957e+00, + 1.4462845e+00, -1.1436753e+00, 7.8876835e-01, -6.7686230e-01, + -9.3259799e-01, 7.4118137e-02, 2.1128911e-01, 2.6312185e-02, + -2.2259822e-02, -1.5083861e+00, -2.7273307e+00, -8.5954350e-01, + -4.6734902e-01, 1.5499024e+00, 4.5016751e-01, 1.2971551e+00, + 2.9964414e-01, -1.0238653e+00, 1.0269226e+00, -1.9246057e-01}; \ No newline at end of file diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/net.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/net.c new file mode 100755 index 00000000000..91a5a5054b2 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/net.c @@ -0,0 +1,103 @@ + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#include "weight.h" +#include "net.h" + +static const unsigned char *g_Input0 = 0; +int SetInputs(const void **inputs, int num) { + if (inputs == NULL) { + return RET_ERROR; + } + if (num !=1) { + return RET_ERROR; + } + g_Input0 = inputs[0]; + return RET_OK; +} +int CopyOutputsData(void **outputs, int num) { + if (outputs == NULL) { + return RET_ERROR; + } + if (num != 1) { + return RET_ERROR; + } + memcpy(outputs[0], g_Buffer+56, 40); + return RET_OK; +} + +int GetBufferSize() { + return 11360; +} +int SetBuffer( void *buffer) { + if (buffer == NULL) { + return RET_ERROR; + } + g_Buffer = buffer; + return RET_OK; +} +void FreeResource() { + g_Buffer= NULL; + g_Input0 = NULL; + void *allocated[] = { }; + for (int i = 0; i < 0; ++i) { + free(allocated[i]); + allocated[i] = NULL; + } +} +void Inference() { + const int g_thread_num = 1; + { +DoQuantizeFp32ToInt8((float *)(g_Input0), (int8_t *)(g_Buffer+0), 0.007874015718698501587, 0, 784, false); + } + { +memset((int16_t *)(g_Buffer+10928), 0, 36); +const int output_shift[12] = {-9, -9, -9, -10, -9, -9, -9, -8, -8, -9, -8, -10}; +const int output_mult[12] = {1575967322, 1893553385, 1652229292, 1640472187, 2103639838, 1107198853, 1760705451, 1082323166, 1139790835, 1956967482, 1374939831, 1914453305}; +arm_convolve_s8((int8_t *)(g_Buffer+0), 28, 28, 1, 1, g_Weight1, 12, 3, 3, 0, 0, 1, 1, g_Weight2, (int8_t *)(g_Buffer+784), output_shift, output_mult, 0, 0, 0, 127, 26, 26, (int16_t *)(g_Buffer+10928)); + } + { +arm_max_pool_s8(26, 13, 13, 0, 2, 2, 2, 2, 0, 0, -128, 127, 12, (int8_t *)(g_Buffer+784), NULL, (int8_t *)(g_Buffer+8896)); + } + { +memset((int16_t *)(g_Buffer+10928), 0, 432); +const int output_shift[12] = {-9, -9, -9, -10, -10, -10, -9, -10, -9, -9, -9, -10}; +const int output_mult[12] = {1463300439, 1589377657, 1282301242, 2029005906, 1734587761, 1880282552, 1101530126, 2125705639, 1090057139, 1527059231, 1493793984, 1935246242}; +arm_convolve_s8((int8_t *)(g_Buffer+8896), 13, 13, 12, 1, g_Weight3, 12, 3, 3, 0, 0, 1, 1, g_Weight4, (int8_t *)(g_Buffer+0), output_shift, output_mult, 0, 0, 0, 127, 11, 11, (int16_t *)(g_Buffer+10928)); + } + { +arm_max_pool_s8(11, 5, 5, 0, 2, 2, 2, 2, 0, 0, -128, 127, 12, (int8_t *)(g_Buffer+0), NULL, (int8_t *)(g_Buffer+1456)); + } + { +memcpy((int8_t *)(g_Buffer+0), (int8_t *)(g_Buffer+1456), 300); + } + { +arm_fully_connected_s8((int8_t *)(g_Buffer+0), g_Weight6, 300, 20, 1, 0, -1, 2108215098, -10, 0, g_Weight7, (int8_t *)(g_Buffer+304), -128, 127, NULL); + } + { +arm_fully_connected_s8((int8_t *)(g_Buffer+304), g_Weight8, 20, 10, 1, 0, -7, 1242805537, -8, 0, g_Weight9, (int8_t *)(g_Buffer+0), -128, 127, NULL); + } + { +DoDequantizeInt8ToFp32((int8_t *)(g_Buffer+0), (float *)(g_Buffer+16), 0.5359870791435241699, 0, 10); + } + { +const SoftmaxParameter softmax_parameter = {{ "", 138, g_thread_num}, 1, {1, 10}, 10, 2}; +memset((float *)(g_Buffer+10928), 0, 4); +Softmax((float *)(g_Buffer+16), (float *)(g_Buffer+56), (float *)(g_Buffer+10928), &softmax_parameter); + } +} diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/net.cmake b/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/net.cmake new file mode 100755 index 00000000000..3c6cb5d7974 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/net.cmake @@ -0,0 +1,25 @@ +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/) +include_directories(${OP_HEADER_PATH}/CMSIS/NN/Include) +include_directories(${OP_HEADER_PATH}/CMSIS/DSP/Include) +include_directories(${OP_HEADER_PATH}/CMSIS/Core/Include) +set(OP_SRC + arm_convolve_s8.c.o + arm_fully_connected_s8.c.o + arm_max_pool_s8.c.o + arm_nn_mat_mult_kernel_s8_s16.c.o + arm_nn_vec_mat_mult_t_s8.c.o + arm_q7_to_q15_with_offset.c.o + exp_fp32.c.o + quant_dtype_cast_int8.c.o + softmax_fp32.c.o + weight.c.o + net.c.o + session.cc.o + tensor.cc.o + string.cc.o +) +file(GLOB NET_SRC + ${CMAKE_CURRENT_SOURCE_DIR}/*.cc + ${CMAKE_CURRENT_SOURCE_DIR}/*.c + ) +add_library(net STATIC ${NET_SRC}) diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/net.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/net.h new file mode 100755 index 00000000000..5441b55e23e --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/net.h @@ -0,0 +1,50 @@ + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifdef __cplusplus +extern "C" { +#endif +/** + * set input tensors + * @param inputs, the input data ptr's array of the model, the tensors' count of input may be greater than one. + * @param num, the input data's number of the model. + **/ +int SetInputs(const void **inputs, int num); + +int CopyOutputsData(void **outputs, int num); + +/** + * get the memory space size of the inference. + **/ +int GetBufferSize(); +/** + * set the memory space for the inference + **/ +int SetBuffer(void *buffer); + +/** + * free the memory of packed weights, and set the membuf buffer and input address to NULL + **/ +void FreeResource(); +/** + * net inference function + **/ +void Inference(); + +#ifdef __cplusplus +} +#endif diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/session.cc b/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/session.cc new file mode 100755 index 00000000000..a57d9473c98 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/session.cc @@ -0,0 +1,146 @@ + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "session.h" +#include "net.h" + +#include + +namespace mindspore { +namespace lite { +int LiteSession::CompileGraph(lite::Model *model) { + inputs_.resize(1); + Vector in_shape_0; + in_shape_0.resize(4); + in_shape_0[0] = 1; + in_shape_0[1] = 28; + in_shape_0[2] = 28; + in_shape_0[3] = 1; + inputs_[0] = new (std::nothrow) MTensor(String("graph_input-0"), kNumberTypeFloat32, in_shape_0); + MS_ERROR_IF_NULL(inputs_[0]); + outputs_.resize(1); + Vector out_shape_0; + out_shape_0.resize(2); + out_shape_0[0] = 1; + out_shape_0[1] = 10; + outputs_[0] = new (std::nothrow) MTensor(String("Softmax-7"), kNumberTypeFloat32, out_shape_0); + MS_ERROR_IF_NULL(outputs_[0]); + return RET_OK; +} + + +int LiteSession::RunGraph(const KernelCallBack &before, const KernelCallBack &after) { + const void *inputs_data[inputs_.size()]; + for (size_t i = 0; i < inputs_.size(); ++i) { + inputs_data[i] = inputs_[i]->MutableData(); + } + SetInputs(inputs_data, inputs_.size()); + + Inference(); + + void *outputs_data[outputs_.size()]; + for (size_t i = 0; i < outputs_.size(); ++i) { + outputs_data[i] = outputs_[i]->MutableData(); + } + CopyOutputsData(outputs_data, outputs_.size()); + + return RET_OK; +} + +LiteSession::~LiteSession() { + FreeResource(); + if (runtime_buffer_ != nullptr) { + free(runtime_buffer_); + runtime_buffer_ = nullptr; + } + for (auto &input : inputs_) { + if (input == nullptr) { + continue; + } + delete input; + input = nullptr; + } + for (auto &output : outputs_) { + if (output == nullptr) { + continue; + } + delete output; + output = nullptr; + } +} + +int LiteSession::InitRuntimeBuffer() { + int buffer_size = GetBufferSize(); + runtime_buffer_ = malloc(buffer_size); + if (runtime_buffer_ == nullptr) { + return RET_ERROR; + } + int ret = SetBuffer(runtime_buffer_); + if (ret != RET_OK) { + return RET_ERROR; + } + return RET_OK; +} + +Vector LiteSession::GetInputs() const { + Vector inputs; + for (const auto &input : inputs_) { + inputs.push_back(input); + } + return inputs; +} + +Vector LiteSession::GetOutputsByNodeName(const String &node_name) const { + Vector outputs; + return outputs; +} + +Vector LiteSession::GetOutputTensorNames() const { + Vector output_names; + for (const auto &output : outputs_) { + output_names.push_back(output->tensor_name()); + } + return output_names; +} + +mindspore::tensor::MSTensor *LiteSession::GetOutputByTensorName(const String &tensor_name) const { return nullptr; } + +} // namespace lite + +session::LiteSession *session::LiteSession::CreateSession(const lite::Context *context) { + auto *session = new (std::nothrow) lite::LiteSession(); + if (session == nullptr) { + return nullptr; + } + session->InitRuntimeBuffer(); + return session; +} + +session::LiteSession *session::LiteSession::CreateSession(const char *net_buf, size_t size, + const lite::Context *context) { + session::LiteSession *session = CreateSession(context); + if (session == nullptr) { + return nullptr; + } + int ret = session->CompileGraph(nullptr); + if (ret != lite::RET_OK) { + return nullptr; + } + // Init(const_cast(net_buf), size); + return session; +} +} // namespace mindspore diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/session.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/session.h new file mode 100755 index 00000000000..e9b6f149088 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/session.h @@ -0,0 +1,71 @@ + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_ +#define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_ + +#include "include/errorcode.h" +#include "include/lite_session.h" + +#include "tensor.h" + +namespace mindspore { +namespace lite { + +#define MS_ERROR_IF_NULL(ptr) \ + do { \ + if ((ptr) == nullptr) { \ + return mindspore::lite::RET_ERROR; \ + } \ + } while (0) + +class LiteSession : public session::LiteSession { + public: + LiteSession() = default; + + ~LiteSession() override; + + void BindThread(bool if_bind) override {} + + int CompileGraph(lite::Model *model) override; + + Vector GetInputs() const override; + + mindspore::tensor::MSTensor *GetInputsByTensorName(const String &tensor_name) const override { return nullptr; } + + int RunGraph(const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) override; + + Vector GetOutputsByNodeName(const String &node_name) const override; + + Vector GetOutputTensorNames() const override; + + mindspore::tensor::MSTensor *GetOutputByTensorName(const String &tensor_name) const override; + + int Resize(const Vector &inputs, const Vector> &dims) override { return RET_ERROR; } + + int InitRuntimeBuffer(); + + private: + Vector inputs_; + Vector outputs_; + void *runtime_buffer_; +}; + +} // namespace lite +} // namespace mindspore + +#endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/tensor.cc b/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/tensor.cc new file mode 100755 index 00000000000..6daa17242ab --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/tensor.cc @@ -0,0 +1,83 @@ + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tensor.h" + +namespace mindspore { +namespace lite { +size_t DataTypeSize(const TypeId type) { + switch (type) { + case kNumberTypeFloat64: + return sizeof(double); + case kNumberTypeFloat: + case kNumberTypeFloat32: + return sizeof(float); + case kNumberTypeInt8: + return sizeof(int8_t); + case kNumberTypeUInt8: + return sizeof(uint8_t); + case kNumberTypeFloat16: + case kNumberTypeInt16: + return sizeof(int16_t); + case kNumberTypeInt32: + return sizeof(int32_t); + case kNumberTypeInt64: + return sizeof(int64_t); + case kNumberTypeUInt16: + return sizeof(uint16_t); + case kNumberTypeUInt32: + return sizeof(uint32_t); + case kNumberTypeUInt64: + return sizeof(uint64_t); + case kNumberTypeBool: + return sizeof(bool); + case kObjectTypeString: + return sizeof(char); + case kObjectTypeTensorType: + default: + return 0; + } +} + +MTensor::~MTensor() { + if (data_ != nullptr) { + free(data_); + data_ = nullptr; + } +} + +int MTensor::ElementsNum() const { + int elements = 1; + for (int i : shape_) { + elements *= i; + } + return elements; +} + +size_t MTensor::Size() const { + size_t element_size = DataTypeSize(data_type_); + return element_size * ElementsNum(); +} + +void *MTensor::MutableData() { + if (data_ == nullptr) { + data_ = malloc(this->Size()); + } + return data_; +} +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/tensor.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/tensor.h new file mode 100755 index 00000000000..fd0a0a07358 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/tensor.h @@ -0,0 +1,66 @@ + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_ +#define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_ + +#include "include/ms_tensor.h" + +namespace mindspore { +namespace lite { +struct QuantArg { + double scale; + int32_t zeroPoint; + float var_corr{1}; + float mean_corr{0}; + bool inited; + Vector clusters{}; + int bitNum; + int roundType; + int multiplier; + int dstDtype; +}; + +class MTensor : public mindspore::tensor::MSTensor { + public: + MTensor() = default; + MTensor(String name, TypeId type, Vector shape) : tensor_name_(name), data_type_(type), shape_(shape) {} + ~MTensor() override; + + TypeId data_type() const override { return data_type_; } + Vector shape() const override { return shape_; } + void set_shape(const Vector &shape) override { shape_ = shape; } + int ElementsNum() const override; + size_t Size() const override; + String tensor_name() const override { return tensor_name_; } + void set_tensor_name(const String name) override { tensor_name_ = name; } + void *MutableData() override; + void *data() override { return data_; } + void set_data(void *data) override { data_ = data; } + + private: + String tensor_name_; + TypeId data_type_; + Vector shape_; + void *data_ = nullptr; + Vector quant_params_; +}; + +}// namespace lite +}// namespace mindspore + +#endif// MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/weight.c b/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/weight.c new file mode 100755 index 00000000000..2ac043b15b4 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/weight.c @@ -0,0 +1,219 @@ + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "weight.h" + +unsigned char * g_Buffer = 0 ; +const int8_t g_Weight1[] = { +-34, 127, 112, -4, -17, 69, -9, -127, 94, 41, -78, -56, 11, -127, -106, 93, 127, 50, 85, 80, -57, 127, 109, -127, 43, 1, -60, -6, 20, 19, -53, 52, 8, -1, -85, 127, -127, -92, 47, 34, 16, 127, 9, 117, 66, +-127, -2, 27, 68, 102, -48, 121, 125, 127, -57, -127, -113, 45, 80, 127, 74, 77, 39, 43, 127, 105, -12, 33, 101, -115, -121, -127, 127, 75, -5, 19, 57, -126, -26, -127, -75, 68, 110, 127, 80, 69, 98, -108, -119, -127, +10, -55, -127, 99, 14, -4, 119, 127, 97, -67, -111, 59, 25, 127, 114, 38, -127, -79, +}; + +const int32_t g_Weight2[] = { +-2099, -922, -314, -1140, -534, -559, -154, 509, 9591, 1598, -177, -3398, +}; + +const int8_t g_Weight3[] = { +25, 101, 86, 39, 62, 80, 76, 3, -61, 31, 46, 51, 43, 39, 50, 1, 36, 28, 60, 74, 64, 57, 27, 59, 53, 18, 18, -10, -20, 13, -34, 30, 27, 61, 4, 2, 48, 91, 39, 63, -5, 45, -6, 61, -120, +18, 40, 34, 42, 127, 43, 42, 54, 54, 88, 51, -127, 23, 74, 15, 22, 67, 48, 40, 66, 18, 73, 31, -54, -18, 68, 7, 68, 61, 31, 0, 9, 36, 19, 94, -94, 71, 27, 58, 28, 25, 42, 68, 34, -1, +42, 108, -51, 114, 52, 35, 76, 86, 10, 30, 23, 6, 78, 72, -47, 86, 51, 65, 78, 51, 35, 48, 22, 16, 16, 54, -30, 77, 2, 31, 53, 93, 50, 64, 40, 73, 24, -4, -101, -38, 66, 58, -2, 127, 62, +4, 1, 10, 42, -25, -26, 6, 37, 8, 47, 14, -26, -9, 23, 8, 24, 68, 11, 38, -4, -16, 89, 37, 39, 62, 8, 25, 24, 72, -127, 69, 0, 17, 25, 110, 77, 11, -16, 58, 69, 8, -124, -61, 119, 15, +-25, -1, -41, -23, 7, -10, -30, 17, 20, 18, -81, -31, 77, -16, -9, -4, 10, 22, -28, 43, -31, 49, -19, -1, 47, 69, 73, 59, 16, 38, 0, 72, -106, 17, 85, 42, 52, 18, 64, 7, -21, 65, -37, 8, -71, +3, 46, 14, -42, 44, 7, 6, 24, 22, 64, -12, 3, 4, 27, 32, -13, 4, -43, -23, 30, 18, 98, -16, -4, 29, 19, 8, -15, -49, -8, -7, -74, -51, -101, 127, 28, 10, -117, -46, 20, 19, -13, -36, -12, -21, +43, 37, -37, 21, -30, 6, -23, -87, -11, -35, -14, -2, 19, 73, 38, 89, -76, 34, -67, -13, -124, -118, -52, -102, -42, 9, -58, -33, -46, -123, -9, -37, -73, -99, -112, -99, -65, 83, -58, -6, -91, -75, -54, -47, -67, +-83, -89, -104, -11, -19, 33, -12, -127, -62, 10, -82, 9, 26, 66, 35, -86, -45, -35, 29, -34, 7, 39, 41, 64, 89, 96, 44, 4, -115, -36, -127, 47, 75, 75, 12, 126, 56, 34, 12, 22, -49, 64, 20, -20, 72, +4, -69, -64, 20, 10, -31, -59, -90, -81, -88, -94, 20, 95, 19, 55, 63, 26, 83, -41, -122, -80, -38, 10, 78, -1, 104, 82, 4, -45, 43, -58, -62, -26, -79, 62, 34, 65, -45, 1, 9, 43, 37, -83, -42, -50, +-92, -35, 27, 91, 98, 100, 19, 34, 11, -13, 44, -96, -29, -7, 55, 5, 91, -4, 37, 7, 75, 114, 52, -73, 20, 127, 41, 39, -5, 127, 65, -6, 47, -91, -42, 32, -27, 36, 55, -69, 12, 34, -6, -73, -43, +-64, -72, 10, 9, -64, -23, -71, -115, -48, -88, 59, -37, 66, -76, -1, -21, -49, -26, -24, 49, 103, 47, -76, 84, -90, -125, -17, -127, -11, 79, -31, 8, -2, -60, -21, -72, -27, -24, 68, -20, 9, -48, -62, -21, -117, +-31, -47, -76, -23, -19, 2, 33, -92, -23, 56, 72, 9, 46, 13, 26, -64, -72, -68, -52, 63, 65, -5, 33, 7, -67, -28, -10, 15, 0, 12, 12, 83, -29, -46, -39, -14, 12, -27, 25, -52, -30, -28, -1, -2, -60, +4, 25, 34, 45, 73, 10, 76, 25, 34, 108, 16, 45, 31, 77, -31, 6, 68, 8, 53, -80, -43, -26, 47, 32, 66, 76, 92, 72, 109, 66, 99, -119, -50, -82, 63, 127, 102, 24, 77, 44, 71, 56, 64, -24, 73, +-14, 94, 101, 59, -27, 63, 52, 80, 106, 7, -33, 48, -7, 34, 69, 77, 48, 51, 40, 68, 94, 1, -16, -22, -87, 65, 33, 30, -12, 62, 62, 89, 70, 67, 8, 49, 49, 39, 64, 72, -56, 38, 108, -9, 16, +37, -40, -46, 17, -14, 99, 53, 25, 36, 108, -15, 52, 41, 104, -127, 11, 98, 83, -47, -5, -77, -43, 44, -7, 11, -71, -38, -127, 28, 3, 3, -9, -43, -18, -18, 21, 41, -1, -20, -18, -16, -2, -5, 55, 6, +-10, 46, 43, 99, -11, -52, -28, 40, 42, 32, -24, 19, 12, 42, 20, 81, 40, -50, -9, 37, 63, 39, -1, -8, 35, 29, -26, 81, -33, -35, -11, -10, 36, 43, 12, 19, -5, 14, 32, 80, 67, -58, -2, -32, 58, +12, -54, -8, 14, -24, -55, -84, 28, -28, 33, -62, -55, -61, -43, -25, 5, -77, -75, -64, 62, 15, 57, -60, -34, -25, -10, -53, -44, -49, -95, -39, 127, 39, 113, -114, 12, 70, -19, -24, 33, 61, 55, -9, -102, 35, +-52, -3, 20, 28, 97, 64, 34, 103, 68, 13, -32, -45, -6, 43, 108, -8, 49, 97, 78, -22, 14, -46, 34, 42, 4, 73, 61, 60, -47, -1, 78, 83, 64, 13, -123, -58, -127, -6, 19, 91, 11, 80, 109, 45, 83, +-32, -41, 52, -30, 46, 72, -40, 15, 113, 9, -95, -32, -79, 5, 113, -3, 18, -42, 81, -49, 98, 65, 94, 99, -43, -85, 6, -79, 1, 68, 50, -27, 109, 65, -44, 68, -92, -90, 83, -36, -22, 46, -87, 6, 13, +-10, -111, -53, -76, -110, 127, -16, -31, -73, 14, -6, 19, -6, 3, 5, -78, 7, -57, 20, -36, 7, -46, 3, 1, -56, -43, -48, -52, 26, -96, 19, -48, -21, -11, 58, -28, -8, -31, 16, 6, -59, -106, -97, 43, -20, +0, 27, -16, -12, 6, 28, 10, -12, -127, -48, -4, 15, -28, 69, -72, -41, 42, 35, 64, 12, -92, 17, 31, 40, 24, 11, -28, -5, -23, 24, 59, 90, -40, 56, -19, -12, -34, 14, 18, -27, 6, -15, 106, 57, -63, +74, 11, 40, -2, -49, 28, -21, -43, -37, 80, 102, -47, 127, -88, 21, 18, 43, 10, -42, -54, -31, 43, 108, -41, 99, -73, 2, 41, 44, 8, 37, 14, 44, -17, 31, 0, 60, 63, 36, 41, 73, 36, 12, 24, 47, +27, -20, -49, -41, 22, 30, 45, 12, 73, 4, -12, 7, 23, -6, 14, 14, -4, 34, 49, 54, -6, 26, 38, -3, 4, 33, -10, 69, 28, 21, 65, 85, 24, 36, 15, 58, -27, 15, -94, 11, 63, 60, -14, 76, 64, +32, -38, -1, 1, -29, -36, -82, 65, 17, 5, 16, 7, -18, 43, 14, 11, 44, 56, 63, 21, 25, 83, -13, 38, 33, 7, 29, -40, 76, -63, 77, -15, 45, 50, 127, 43, 54, 45, 23, 64, 26, -127, 23, 120, 13, +9, -104, -6, -36, -86, -60, -101, -33, -17, 18, -105, -39, -42, -74, -48, -24, 9, -44, -46, -41, -31, 11, -57, -3, -4, -56, -13, 44, 14, 45, -20, 28, 20, -8, -16, 36, -85, 24, -69, -96, -64, -88, -15, 73, -6, +27, -43, -112, -9, -21, -104, -83, -85, -83, -72, 127, -11, 63, -65, -74, -14, -74, -37, -38, -60, -79, -127, 93, 19, 95, -113, -37, 9, 20, -52, -11, 8, 42, 59, -77, -38, -1, 30, -10, -16, 48, -71, -29, -17, 38, +34, 7, -14, -58, 32, -27, -74, 45, -79, -55, -57, -25, -1, -59, -31, -108, 46, -56, 29, -40, -24, 0, 48, -8, -6, 74, 58, 121, -68, -14, -27, -30, 20, -34, -87, -96, -43, 14, 54, 18, -127, -62, -20, -72, -76, +-58, -33, -23, -122, -46, -20, -46, -88, -33, -109, 56, -15, -36, 14, -33, 41, 27, 34, 4, 33, -44, -79, -7, -69, -81, -53, -43, 26, -25, -26, 47, 51, -83, -6, 53, -48, -58, -6, 1, 29, 33, 46, -89, 24, -16, +18, 15, 14, 7, 105, 90, 58, 43, -45, 56, 113, 59, 76, 23, 62, 8, 24, 73, 37, 127, -118, 89, 81, 94, 31, 33, -29, 14, 80, 70, 73, 44, -102, 57, 127, 55, +}; + +const int32_t g_Weight4[] = { +-2310, -1291, -1028, -928, 817, -413, -884, -203, -3754, -1009, 361, -1573, +}; + +const int8_t g_Weight6[] = { +-13, -40, -42, 13, -47, -44, 2, -30, 0, -65, -34, -51, -1, -96, 26, -16, -31, -27, -2, -26, 35, -20, -48, 34, 20, -52, -14, -72, 0, 31, 30, -6, 12, -27, -14, 45, -8, -3, -16, 24, 53, 15, -25, -33, 51, +59, 39, -1, -2, 59, -3, -60, 51, 1, -27, 17, 29, 15, 16, -53, -22, -24, -6, -75, -50, -28, -4, 57, 44, -74, -37, 10, -58, -36, -18, -54, -69, -13, 59, -42, 28, -39, -13, -8, 24, 52, -57, -40, -80, 40, +31, -68, -17, 71, -44, 50, 46, -33, -5, 7, -3, -15, -53, -23, -30, -30, -11, 20, 7, 77, 4, -37, -25, -64, -100, 4, -4, -19, 5, 2, 23, 7, 34, -17, 30, -38, 10, 36, 19, -20, 22, -20, 57, 34, 44, +17, 77, -16, -34, -60, -38, -1, 36, 11, 65, 9, 10, -29, -56, 30, 22, -61, 7, 52, -18, 16, 57, 13, -96, -40, 5, 20, 20, 64, 44, 11, 17, 63, -15, -39, -40, -4, -15, 23, 17, 5, -1, -23, -101, -15, +-2, 37, -26, -10, -11, -12, -6, -31, -50, 1, -45, 5, -13, 2, -36, 31, -70, -16, 34, 21, 16, 12, -127, -52, 2, 11, 62, 48, -23, -5, 27, 57, 58, 32, -94, -53, 10, -2, -9, 15, 21, 9, -2, -5, -4, +20, -18, -23, 11, 37, -27, -26, -5, -4, -4, 0, -45, 12, 3, 27, -46, -20, 31, 23, 16, 2, 27, -36, 30, 2, -26, -38, 45, 11, 36, 3, 0, 34, 31, -17, 1, 31, -17, 10, 17, 36, 12, 34, -12, 11, +9, -49, 1, -14, -19, 6, 49, -7, 50, -26, 28, 13, 5, -33, 5, 32, -23, 4, 36, 98, -20, 13, -20, 44, -3, -31, 40, 85, 51, 127, -17, 4, 9, 7, 11, -27, 42, 3, 22, -6, 33, 12, -26, 20, 26, +-6, 29, -1, -13, 51, -1, -5, -28, -1, -51, 11, -3, 5, 44, 4, -24, 44, -37, 33, -65, 7, 28, 75, 7, 54, 14, -23, -35, -3, -9, 1, -30, -13, 8, 57, 27, 15, -4, -23, -14, 5, 5, 42, -6, 5, +-11, -12, 14, -12, 54, 24, -9, 16, -24, 19, 71, 32, 15, -17, 4, 30, 72, 24, -5, 21, -1, 8, 20, 71, -24, -9, 9, -20, 59, -13, -17, 28, -10, 25, -41, 24, 4, 44, 10, 41, 45, 18, 21, 2, -8, +27, -5, 16, 4, 90, -8, 36, 30, 21, -50, 0, -23, 76, 18, 8, -2, 4, -5, 18, -7, 39, -4, 78, 33, 20, -22, 7, 4, 2, -20, 5, -3, 20, 0, 101, 26, -40, -42, 29, -33, -45, 18, -29, 52, -32, +4, 14, 5, -17, 4, 30, -10, -16, -40, 25, 5, -23, -41, 43, 4, 13, 15, 17, 38, 61, -77, 71, -20, 52, 44, 41, 3, 15, -41, -1, -10, -19, 13, 33, -35, 34, -14, 103, 6, 29, -70, -47, 15, 8, 1, +68, 20, 54, -6, 99, 4, 26, -127, -37, -18, -5, -35, 54, 25, 59, 32, 103, 35, -3, -79, 22, -14, -11, -11, 30, 49, 16, 49, 29, 47, -42, 12, 11, -26, -16, 127, -6, 30, 39, 96, 28, 47, -1, 51, 50, +-20, -19, -24, 2, -16, 0, 35, 46, 38, -4, -22, -20, -20, 5, 5, 53, 68, -3, 42, 67, 1, 10, -56, -69, 39, 55, 60, 67, 80, 10, 33, -24, -3, 56, 9, -64, 81, 69, 53, 49, 16, -4, 7, -27, 17, +43, 36, 12, 51, 47, 44, -34, -7, 34, 51, -37, 39, -1, 126, 73, -47, -31, -20, -39, -12, -19, -56, -2, -41, -8, -20, -15, -33, -37, -28, -39, -22, -30, 22, -115, 55, -10, -12, 18, 29, -18, -34, -45, -11, -3, +22, -10, 62, -6, 61, 44, 76, 12, -8, -6, 17, 5, 45, -19, 41, 72, 72, 66, 51, 3, -6, -49, 33, -1, -55, -41, -5, 48, 8, 50, 32, 18, 20, 0, 44, 42, 13, 16, 3, -32, 26, 34, 16, -29, 48, +6, -23, 33, 66, -41, 17, 17, 22, -5, 49, 101, -8, 7, -52, 16, 64, -59, 53, 52, -13, 47, 27, 57, -19, 13, -43, -8, 35, 1, 57, 11, -17, -25, 51, 127, -63, 41, 12, 21, -29, 4, 4, 94, 26, -25, +-1, 33, -18, 12, -1, -25, 18, 26, 7, 10, 0, -24, 15, 2, 7, 27, -8, -16, -38, -13, -10, 38, -27, 6, -37, 51, -31, -1, -52, 2, -15, -36, -1, 37, -50, -37, -21, 18, 28, 4, -16, -26, 36, 40, 16, +-5, -127, -49, -33, 38, 4, 15, 9, -2, 23, 12, 9, 1, -52, -44, -78, -13, -26, -28, 21, -56, -22, -13, -61, -20, 0, -42, -7, -8, -4, 14, 37, -17, -31, 35, 19, -1, -72, -4, -5, -1, -29, 14, 8, 16, +-21, 26, -27, 2, -20, -51, 0, -20, -7, -38, 9, 4, -42, -3, 0, -17, 16, 28, -25, 13, 19, -32, -7, -54, -28, 22, -42, -35, -7, -22, 15, 36, 12, 80, 5, 5, -29, 12, -6, 82, -29, 14, 47, 11, 6, +31, 45, -29, 24, 49, -11, -17, -8, -6, 18, 42, -6, 20, 52, -14, -11, 5, -3, 57, -5, 35, 5, -11, 2, -4, 40, 6, -30, 13, -6, -34, -18, 30, -24, -54, -42, -15, 33, 13, -45, 34, -25, -21, -7, -61, +-58, -83, -58, -37, -23, -8, 13, -16, -13, -13, -26, 6, -17, -71, 30, 3, -31, -21, 16, -31, 1, -28, -33, 3, 29, -106, 67, -74, 1, -28, 47, 1, 53, 40, 61, 32, 15, 2, 50, 1, -66, -32, 66, -80, 80, +28, 31, 84, -7, -55, 127, -46, 51, -10, -4, -95, -38, -32, -7, -35, -1, 25, -14, 34, 43, 53, 31, -5, 42, 14, 2, 8, 38, 23, -22, 52, -71, 56, 12, 38, -29, -31, 32, 17, 12, 52, -15, 53, -99, -13, +50, -68, -6, 80, -8, 8, 10, 16, 93, 5, -70, -73, 26, -50, 40, 3, 118, -30, 31, 31, 68, 7, -4, -27, -34, -98, -46, -35, 98, 71, 20, 19, 2, 8, 29, -17, 24, -84, 57, 15, 89, -4, -23, 8, 39, +45, -44, -51, -28, -65, 38, 11, 79, 2, -4, 70, -25, 6, 0, -35, -9, -26, 6, 69, -29, 45, 6, 44, -59, 10, -7, 35, -29, -26, 57, -39, -30, -2, -31, -64, -46, 14, -18, -42, -53, 1, -21, -30, -43, -12, +-77, 16, -46, -5, 22, 15, -10, -40, 18, -73, -26, -44, 23, 4, 43, -3, -114, -37, -5, -30, 27, -39, -46, 35, 40, 5, -40, -12, -36, 32, -15, 29, 34, -15, -127, -15, -51, 8, -43, 17, 33, 36, -65, 42, -53, +27, -62, -54, -67, 43, 3, -38, 46, -25, -5, -35, 23, 26, 21, -15, -71, -27, -28, 34, -8, 9, 72, 39, 22, 33, -57, -8, 14, 41, 3, 51, 9, -6, 6, 8, 10, -32, -3, -14, 83, -25, 51, 33, -6, 24, +13, -26, 43, -27, -57, 53, 37, 17, 52, -20, -20, 28, -14, -25, 0, -19, -10, 32, -8, 65, 23, -24, 15, 20, 6, -52, -30, -20, 116, 118, -1, -2, 9, -11, -5, -15, 22, 3, 25, 22, 1, -22, -36, -12, 20, +-26, 8, -8, 1, 17, 1, -56, -25, 25, -71, -54, -29, 4, -31, -52, -39, 3, -11, 12, -97, -46, -32, -16, 52, -3, -19, -39, 23, -17, 5, -26, -17, 3, -27, -27, 127, -49, -32, -77, 41, -63, 1, -74, -22, -59, +-44, -5, -42, 33, -39, -40, -35, 1, -43, 23, -41, -15, -25, 43, -26, 68, 25, 24, 4, 27, -31, 86, -65, -32, -11, -60, -15, -16, 68, -46, -71, 2, -17, -12, 0, -34, -7, -35, 32, -41, -33, -45, -16, -86, -41, +-9, -23, -25, -5, -14, 10, -83, 12, -87, -7, -63, -14, -5, 31, 77, 5, 92, 10, -23, -8, 29, 29, -78, 42, 22, -48, 52, 64, 32, 44, 20, 30, 8, 17, -52, 6, 33, -15, -33, 36, 20, -18, -27, 83, -23, +-55, -9, -9, 0, -1, 94, 21, -56, -68, -39, -1, -29, 20, 34, -33, 15, 55, 60, 34, -25, -10, -26, 16, -49, -52, -8, 15, 1, -65, -13, -52, -14, -12, -43, -106, 9, -10, -1, -2, -63, -82, -37, 33, -61, -34, +5, -6, -17, -14, 45, 36, -7, -127, -17, 44, -38, 48, 13, 1, 36, -12, 2, -14, -23, -117, -1, -47, 11, 30, 9, 22, 62, 3, -22, 4, -1, -93, -104, 15, 21, 106, -60, -27, 13, 53, -65, -10, -37, 51, 72, +-51, -58, 2, -4, -73, 10, 17, 42, -60, -4, 4, -44, 1, 23, 4, 19, 13, 18, -18, 2, -52, -51, -66, -49, -18, 14, 2, 32, -20, 0, 22, -74, 9, -25, -61, -30, -18, 41, 20, 12, -29, 0, -16, -25, -1, +19, -35, -7, 28, 48, -22, 3, -42, 45, 40, -36, -18, 13, 87, 86, -69, 18, 22, -12, 11, -23, -23, 1, 3, -61, -55, 12, 56, 11, 60, 36, -77, 14, 50, -103, 87, 38, 16, 11, 60, -35, 80, -45, -23, 11, +76, -104, 81, -15, 85, 59, 8, -19, 46, -4, -33, 5, 77, -127, 74, 33, 95, 70, 10, -63, -16, -58, -20, -13, -58, -92, -34, 32, 39, 2, 30, -54, 7, -59, -18, -43, 41, -11, 59, -50, -23, -13, -17, -72, 81, +8, -115, 35, 67, -37, 38, -71, 72, 9, -7, 4, 84, -65, -99, -8, 47, -48, 4, -60, 36, -6, 71, 19, 126, -15, -5, -6, 48, -35, 47, 1, 79, 14, 51, 64, 6, -11, 12, -9, -50, -55, 17, 45, 57, -20, +-17, -13, -14, 14, 48, -1, 0, -11, 19, -13, 60, -32, -41, -12, 13, -31, -28, -47, -52, -46, -6, -39, 127, 4, -89, -48, -20, -54, -61, -70, 7, -59, 12, -85, 41, -31, -36, -13, 4, 22, -23, -31, -4, -42, -43, +-25, 37, -32, -42, 26, -58, 50, -40, 35, -81, 5, -33, 39, 43, -66, 10, -26, -23, -31, 53, -34, -69, -4, -25, -5, -74, -42, 13, 10, 20, -61, -2, 8, 19, -99, 3, -27, 78, -33, -12, -91, -21, -76, -4, 10, +-32, -73, -18, -36, 56, 9, -85, -19, -48, -19, -29, -27, -44, 8, -84, -6, -22, 10, -37, -61, -45, 39, 3, 36, -29, 31, 11, -37, -19, -73, 5, 13, 38, -50, -51, -17, 26, 68, -25, -69, 3, 15, -4, 10, 46, +-33, -51, 21, -38, -20, -35, -39, 55, 24, -37, 1, 21, -35, -66, -22, 48, -8, -3, -74, 50, -13, -60, -39, 2, 3, -38, 15, -52, -9, -46, -11, 93, 53, -39, 30, 14, -27, -10, 58, 1, -2, -26, 16, 15, 48, +16, 1, 14, -15, -5, -40, -30, 44, 15, -50, -53, -25, 21, -73, 94, 37, -81, 24, -18, -95, -26, 24, 42, 6, 56, -18, 104, -1, 28, -26, 85, -45, 77, 5, 1, -7, 2, 19, 5, 39, -5, -1, 62, -67, 33, +-3, -23, -29, 25, -38, 7, -56, -10, -38, -9, -60, -75, -34, 5, -21, 23, -83, 35, -74, -11, -66, 6, -33, 42, -104, 35, 18, -84, -82, 61, -56, -99, -25, 78, -2, 15, -58, 69, -31, -4, -20, 41, -36, -86, -60, +23, -64, 18, -2, 36, 14, 21, -4, 32, -55, -50, -54, 44, -4, -4, 14, 102, 1, -49, -60, -50, -20, 17, -26, -101, 0, -56, -28, 15, -11, -2, -50, 30, -90, 25, -34, -28, -14, -42, -52, 127, -14, -84, -80, -44, +-104, -85, -79, 3, -36, -67, -73, 103, -16, -10, -83, -11, -23, -91, 29, -36, -44, 15, -39, -75, 44, 24, 1, 29, -86, -27, -12, 47, -2, 28, -27, -1, 34, -11, -43, 28, -84, 7, -41, -15, 19, -28, -27, 41, 87, +12, -37, -53, -27, 40, -4, -26, -11, -48, 5, -40, 10, -25, -21, -76, -27, -57, -7, -34, -72, -28, -47, 43, 1, 7, 25, -32, 2, -66, 51, -2, -1, 32, 6, -20, -34, 20, 31, -8, -23, -14, 24, -29, -59, 10, +15, 3, -15, -17, -38, -24, -30, -37, 14, -32, -27, -4, -40, -57, -26, 6, 3, -37, -35, -21, 25, 12, -31, -23, -52, -8, 47, 43, -27, -27, 40, -53, 61, -27, -107, -34, 27, 38, -23, -6, -35, -39, -39, -127, 7, +-18, -71, -6, -50, 6, -20, -20, -24, -31, -38, -92, 64, -18, -20, 37, -21, -13, 10, 34, -3, 27, 23, 14, -4, 34, 27, 33, 38, 34, 47, -13, -21, 27, 14, -27, 34, -16, 28, 52, -46, -61, -4, -14, -98, -29, +-48, -6, -16, -7, 18, 40, -83, -34, 21, 13, -51, -26, -24, -7, -10, 72, -55, 66, -5, -22, 9, 19, -45, -31, -24, -35, 61, 69, -26, 18, 42, -10, 45, 13, -20, 71, -16, 89, 17, 16, -7, 0, -3, 55, 25, +-36, -9, 2, 38, -33, 21, 28, 102, 53, -4, -66, 25, -31, 21, -7, -35, 2, 0, 17, 54, -17, 1, -36, -11, 71, 4, -48, -12, -85, -29, -37, 7, 32, -9, -30, -10, -4, -16, 16, -68, -65, -27, 29, -38, 44, +-20, 24, 48, -12, 70, 65, -38, -12, -14, -4, -22, 48, 68, 44, 34, -41, 28, -16, 77, 32, 32, 18, 51, -35, 72, -23, -56, 32, 60, -70, 69, 83, 47, -7, -12, -14, 89, -85, -32, 41, 54, -55, 45, -5, 52, +25, -1, 29, 21, -72, -72, 57, -14, 13, -66, 17, 27, 48, -29, 86, 15, 53, 22, 24, 10, 46, -40, -44, 10, 30, 17, 51, 30, -29, 48, -23, -3, -8, 6, -3, 13, -49, -29, -45, 56, 16, -20, -11, 127, -31, +43, -41, 10, 34, 44, 17, 8, -34, -48, 50, -16, 24, 42, -35, -28, 7, 17, 17, 23, -72, -76, 5, 47, -3, -64, -40, 30, 5, -52, -9, 51, 32, 23, -20, 95, -40, 18, -20, -14, -31, 46, -56, 18, -90, -100, +73, 61, -44, 91, 39, 21, -15, -127, -11, 70, -10, -65, 33, 56, 24, 44, -20, 15, 53, -104, 13, 79, 49, -53, 94, 21, 18, 16, -32, -1, 25, -40, 26, 24, -45, 42, -43, -106, -9, 2, -16, 4, 47, 83, 47, +-5, -53, 18, -54, -83, -5, 24, 15, 11, -56, 16, -45, -44, -61, -120, 26, 28, 20, 38, 17, 24, -3, 48, 14, 45, -22, 42, 26, -5, -26, -21, -6, 1, 28, 26, 20, -19, -12, 8, -29, -19, -17, -45, -3, 5, +39, -17, -31, 0, 1, -1, -34, -69, -64, -22, -58, 30, 20, -7, -3, -5, 41, 72, 11, -98, -5, 15, 2, 26, 14, 4, 9, 37, 54, -19, 50, 24, 6, -1, 35, -7, -35, 73, 9, 23, 15, -31, 45, -48, -2, +26, 49, -4, -9, 68, 2, 33, -32, 55, 5, -44, 46, -10, -23, 34, 34, 48, 52, -64, -54, 71, -11, 24, 52, 44, 1, 35, 53, 4, 57, 1, 72, 19, 3, 35, 26, -20, 20, 41, 46, -37, 44, 57, 126, 60, +-36, 18, -10, 93, -28, 27, 10, 91, 8, 38, -54, -61, -48, -6, 2, 86, 95, 55, -29, 80, -32, 48, -42, -26, -50, -29, 39, 39, -45, -18, 54, 21, 5, -17, -17, -30, 53, 8, -1, 57, 36, 70, 35, 2, 35, +9, 71, -15, 54, 108, -62, 25, -22, 74, 5, 7, 15, 4, 31, 32, -3, 45, -29, 2, 11, 27, -9, 54, 3, 13, 34, -24, 66, 9, 47, 38, 127, 21, 12, -38, -23, 80, 64, 14, 6, 13, 1, -25, 40, -36, +2, -38, -27, -34, 62, 71, -25, -26, -45, 18, -20, 9, 15, 62, 36, -3, -80, -11, 18, 19, -9, 26, 10, -11, -28, 90, -12, -29, -64, -127, -5, -2, -29, -94, 41, -10, -92, -57, -73, -61, 59, 20, -37, -4, 19, +-41, -15, -14, -40, 15, 17, -12, 79, 56, -80, 23, -22, -37, 51, 5, -19, 59, -50, 16, 7, -7, -39, 18, 44, 60, 31, -17, 19, 38, -46, -31, 43, -30, -40, -62, -4, -9, 6, -7, -33, -18, -59, -56, -20, -87, +65, 64, 49, -47, 33, 65, 27, -90, 18, -34, -42, -25, 26, -67, -53, -46, -6, 42, 41, -1, 48, -43, -76, 26, -21, -41, -32, 28, 8, -3, 9, 10, 24, 24, 8, 67, 29, -56, 76, 58, 32, 45, 27, -16, 34, +36, 55, 25, -6, 33, 5, 4, 82, 52, 22, 34, 60, 62, -2, 0, -13, 39, -38, 57, 4, -44, -9, 98, 62, -3, -113, -10, -15, -14, -35, 61, 64, 37, 28, 27, 31, 47, -127, 39, 53, 18, -90, 80, -27, -7, +19, -6, -25, 88, -86, 26, 67, 34, -48, -4, 11, -4, -13, -4, 41, 66, -60, -13, 16, 51, 45, 54, 61, -17, -18, 17, 14, 37, -16, 7, -35, 53, -18, 62, 8, 47, 19, 54, 30, 72, -122, -31, 123, 91, 32, +72, 98, 94, 36, 1, 54, 117, -78, -95, 127, 46, 74, 100, 117, -20, 76, 36, 18, 9, 56, -30, -17, 2, -96, -36, 45, 69, 27, 1, 16, 18, 56, -23, -16, 58, -22, 54, 34, 28, -21, 64, 6, 22, -87, -70, +39, 52, 43, 96, -46, 98, 35, 27, -9, 98, 20, -26, 78, 21, 1, 118, 34, 35, 6, -48, 37, 41, 5, -6, 33, 8, 104, -30, 19, -54, 16, 8, 33, -4, 38, -4, -36, -12, 45, 14, -24, 42, -2, 31, 43, +-16, 23, 7, -5, 24, 6, 50, 15, -36, 20, 29, 23, 82, -18, -80, 36, 70, 40, 6, 51, 19, 74, 14, -11, 74, 1, -55, 22, 99, 38, 5, -44, -8, 11, 0, 51, 24, 86, 46, 65, -35, 96, -21, 3, -15, +80, 93, 47, -8, 60, 97, -6, -18, 48, 51, 72, 14, -9, 30, -4, 16, 72, 60, 0, 99, -26, 6, -15, 36, 34, -24, 77, 71, 31, 16, 39, -10, 43, 20, 28, -28, -15, 6, 49, 32, 26, 39, 3, -15, 84, +-23, -25, 10, 41, -50, 8, 58, 71, 34, 78, 80, 58, 34, 43, 45, 26, -7, -3, 81, 78, 30, 1, 40, -44, 23, 82, 29, 13, 3, 23, 27, 68, 10, 35, 75, -125, -41, -8, -2, -38, 45, -43, 50, -13, -5, +75, -6, 24, -51, -26, 15, 85, -11, 41, -65, 17, -17, 29, -2, 47, -40, -54, 35, 49, -38, 56, -14, 43, 11, -27, -57, 29, -35, -33, 28, 51, -22, 7, 11, 1, 4, -16, 83, -20, 41, 52, 23, 3, 26, -10, +45, -58, -16, 51, 37, -37, 20, 33, -21, -46, 80, -9, 15, -37, -27, -5, -24, 29, -74, 13, 1, -24, 25, -20, -67, 74, -3, -22, -62, 37, -16, -31, 45, 60, 25, 34, -24, 69, 16, 9, -53, 9, -5, -36, 36, +76, -57, 16, -60, -3, 28, 22, 18, 28, 28, 12, 26, 66, 40, -15, 23, -39, 10, 57, 42, 49, 54, 72, 24, -26, 84, -22, 29, -4, 21, 44, -3, 1, 1, 81, -17, 24, -10, -2, 33, 7, 20, 69, -17, 40, +-21, 20, -13, 9, -127, 11, 43, 127, 46, 40, 8, 67, -43, 7, -33, 37, -40, 9, -38, 111, 46, -48, 14, 19, 39, -49, 60, -10, 0, -2, 11, 10, 45, 9, 6, 12, 78, 23, 46, 5, 27, 58, 30, 13, -11, +29, 0, 7, -14, 16, 8, 9, 48, -31, -5, 65, 36, 13, -52, 44, -16, -35, -41, -39, 42, 23, -25, 51, 69, -45, 20, 24, -28, -17, 25, 7, 40, -12, -17, 7, 19, -12, 65, 27, 37, 41, 31, -17, 37, 2, +64, -12, 35, -20, 10, 3, 12, -23, 17, -10, -5, -6, 47, 36, -14, 17, 15, -7, 66, 0, 38, 29, 18, -36, 22, 5, 8, 30, 0, 30, 19, 42, -14, 14, 73, -1, 35, 80, 58, -16, 48, -18, -11, 6, -22, +46, 27, 4, -67, 35, -33, -9, -19, -6, -25, -39, -14, -18, 29, -3, -11, -35, -51, -21, 58, 32, 33, 34, 69, 23, 76, -12, 8, 18, 34, 28, 63, 38, 37, -30, 39, 67, -32, -4, -5, 77, -31, 68, 31, 9, +-18, -39, -3, -11, -77, -25, -29, 61, 3, -19, -3, -10, -22, 69, 10, -13, -20, 19, 30, 61, -5, -1, 5, -39, -37, -41, 19, -12, 19, 30, -64, -19, 53, -16, -51, 8, 40, 2, 5, 78, 116, 67, -64, -94, -15, +14, -27, 46, -59, 78, -17, -20, -95, 18, -6, 116, 23, -44, 13, 32, 9, -51, -45, -33, -41, -15, 15, 88, 2, -2, 99, -21, 8, -33, 64, -5, -24, -46, 88, 72, 17, 48, -36, 59, -28, 19, -30, 63, 21, 80, +14, -83, 6, -18, -35, 127, -44, -8, -60, 83, 0, 7, -16, -24, 65, 17, -60, -15, -64, 76, -10, 13, -45, -27, -65, 44, 95, -59, -57, 25, -72, -69, 10, 20, 32, -22, -35, 87, -3, 31, 45, -24, 22, -14, 56, +63, -17, -12, 55, -37, -52, 79, 29, 37, -55, 29, -34, 15, -56, 55, 22, 78, 14, 28, 47, 11, -55, 12, -52, 0, -52, -46, 39, 68, 61, 23, 26, 13, 72, 5, 68, 20, -40, -13, 16, -15, -1, 64, -15, 31, +57, 9, -10, -38, -12, -13, -40, 36, -30, -24, 15, -64, 22, -22, -15, 5, -66, 34, -4, -44, -14, 9, 10, -70, -6, -7, -13, -15, 60, -6, 38, -113, -43, 9, -31, -73, 6, 55, -14, -5, 54, 0, 12, -127, -38, +84, -3, 47, 109, 24, 44, 60, 104, -11, 32, 4, 36, -3, 39, 121, 27, 50, 34, -46, 37, -6, -19, 24, -62, -15, 41, 2, 23, 23, -3, 8, 20, -21, -27, -28, -91, -26, 122, -47, 4, 43, -22, -19, 19, -53, +-12, -9, -14, 30, 97, -24, 10, -8, -20, -21, -4, -2, 41, -51, -61, 5, -29, 67, -53, 58, -42, 13, 9, -32, -56, 56, 3, -23, -94, 56, -75, -48, -60, 3, -19, 45, -56, 127, 53, -69, -24, -28, -40, -30, -24, +-28, 32, 28, -57, 2, 74, -49, -14, -71, 50, 5, -12, -10, 43, -63, 3, -17, 84, -38, 3, -58, 38, 39, -15, -84, 49, -36, 13, -9, -14, -44, -76, -17, -35, 37, -15, -36, 95, -74, -86, -9, 30, -121, -85, -17, +-55, 5, -61, 10, -33, -28, -127, -15, 58, -40, -23, -72, 0, -26, 11, -1, 5, -24, -7, -58, 13, 21, -77, -20, -75, 14, -24, -57, -10, 21, -2, -8, 36, -39, -45, 3, -29, -34, -5, -11, -43, 47, -34, 13, 22, +12, 6, -54, -23, 58, -24, -11, 71, 24, -1, -83, 8, -38, -32, -64, 2, -21, 69, -5, 69, -59, -34, -34, 17, 8, 64, -54, 41, -41, 24, -22, 27, 0, 21, -33, -76, 97, -32, 0, -23, -25, -11, 8, -3, 47, +41, 18, -8, -21, -39, 13, -32, -39, 38, 16, 42, 55, -16, -25, 109, 29, 2, -15, 57, 23, 15, 31, 15, 39, 60, -54, 40, -18, 52, 7, 111, -9, 56, 13, -16, 16, 88, -20, -88, 19, 8, 47, 37, -44, -6, +44, -81, 25, 25, 48, 3, 20, -35, 53, -11, -34, 60, 20, -17, 84, 11, 2, -20, 38, -47, 32, -61, -21, -20, 29, -24, 3, -61, 87, 25, 103, 27, 18, 26, 35, 38, 54, -12, 81, -24, 43, 18, 29, 44, 81, +51, -42, 85, 11, 6, 8, 58, 79, 37, 63, 64, 39, 56, 31, 24, 61, 34, -26, 43, 51, -31, 49, 121, 59, 65, 70, 52, -31, 50, 27, 52, 40, -25, 31, 88, -51, 52, 66, -12, -54, 36, 42, 54, -13, 48, +-19, 6, 43, 9, -21, 23, 7, 1, 33, -78, 93, 47, -3, -12, 14, 32, -67, 44, 85, 7, 64, -72, 67, 51, 51, 67, -32, -9, -52, 42, 75, 59, 22, 70, 47, -8, 45, 127, 7, 23, 42, 76, -10, -12, -36, +34, -113, -51, 1, 50, -100, 34, 49, 37, 4, 46, 11, 33, -127, -43, -33, -65, 14, -36, -9, 20, 41, 30, 9, -4, 66, 96, 30, 31, -6, -7, -24, -12, 17, -29, 47, -40, 48, 44, -40, 35, -52, 26, 6, -16, +26, 31, 29, 31, 72, -81, -30, 2, 43, 18, -90, -7, -46, -10, -23, 6, -30, 17, 35, 18, 94, 7, 23, -24, 55, 0, 48, 51, -40, -2, 39, -3, 69, 34, 55, 59, 62, 49, 43, 33, -28, 99, 49, 17, -50, +22, -24, 34, 0, -3, -19, -4, 31, -11, 17, 40, -24, -16, 15, -53, -56, -10, -15, 20, 82, 36, 59, 41, -32, -9, -38, 0, 14, 36, 8, 72, 49, 106, 44, 55, 44, -15, -19, 59, 55, 62, -9, 35, 38, 79, +54, -6, 75, 14, 11, 72, 46, 53, 81, 53, -8, -1, 8, 17, -12, 72, 59, 55, 35, -14, 63, 47, 22, 20, 74, -1, 0, 0, 20, -37, 0, 10, 35, 39, 54, 88, 44, -53, 11, -39, -67, 55, 11, 1, 12, +-4, -32, 69, 73, -55, 102, 39, -19, 63, 33, 80, 22, -22, 52, -6, 26, -50, 32, 37, -22, -5, 53, -59, 48, -16, 55, 35, -24, -103, 60, -55, 8, -59, 63, -37, 73, -41, -63, -7, 45, 53, -58, 52, -20, -11, +-38, 38, -7, 21, 1, 39, -13, 82, -85, -43, 62, 1, -30, 88, 2, 63, 11, -17, 30, 80, -78, 59, 55, 8, 10, -2, 8, 26, 7, -9, -40, -47, 12, 17, -12, 18, -6, 96, 13, -18, -33, 75, -31, -40, -23, +-21, 5, -62, -22, 41, 20, 25, -72, 82, -50, 56, -46, 53, -40, -54, -43, 49, -54, -4, -124, 47, -65, 38, -70, 81, 20, 45, -85, 79, 19, -8, -85, 7, -3, 48, -91, 79, 77, -30, -23, 123, -1, 69, -115, -27, +34, 36, -18, 70, 44, 8, -7, 46, 31, 55, -90, -10, 34, 70, 19, 49, 120, 84, -2, 88, 46, 16, 26, -40, 52, 48, 3, 15, -23, 73, 87, -36, 27, 68, 13, 30, 22, 39, -127, 25, 70, 46, 82, 101, 68, +-35, -95, 2, 37, -31, -22, 74, 99, 63, 95, 62, 68, 28, -46, -67, -5, 49, 44, 72, 34, 25, 5, 24, 59, -1, 15, 13, 127, -2, 6, 69, 102, -7, 26, -20, 11, 81, 8, -27, 30, 4, 112, 25, -46, 1, +29, -12, 81, -49, 57, -3, -37, 3, 63, 1, 94, 33, 5, 53, 14, 18, -28, -20, 19, -17, -1, -19, 50, -25, -10, 19, 13, -13, 18, -28, -16, -30, 16, 2, 95, -70, -68, -74, -5, -42, 1, 64, -26, 28, 16, +-14, -18, -66, -11, 24, 31, -59, 15, 93, -39, -19, -21, 10, 1, -53, -43, 26, -42, 8, -35, -64, -7, 80, -16, 50, 59, 27, 45, -29, -10, 30, -5, -21, 26, 96, 11, 28, 11, -16, -29, -59, -17, 69, 14, 43, +6, 2, -50, 12, 17, 3, -9, 10, -31, 13, -58, 1, 27, -15, -75, 52, 23, -48, -26, 11, 36, -4, -24, -8, -25, -48, -50, 35, 8, 49, 9, 40, 24, -2, -27, 19, 37, -63, 58, 14, 17, -11, 23, 20, 16, +-41, 18, 13, 20, -119, 76, 26, -22, 29, 64, 4, 6, -35, 9, -14, -43, 13, -68, 32, -31, -23, -62, 53, -2, 15, -39, -21, -31, 16, -20, 18, 38, 7, 5, -17, -20, 26, -27, -22, -32, 38, 37, 9, -12, 65, +28, 20, -10, 7, 0, 40, 27, -59, 21, 4, -14, 6, 45, 26, 26, -13, 127, 25, 8, -47, 99, -36, -14, 13, 44, -46, 6, -30, 75, 39, -6, 34, -26, 28, 55, 20, -27, -43, -40, 55, 20, -22, -13, -8, 17, +4, 19, -11, -26, -11, -2, 16, -12, 10, -23, 36, 2, 51, 0, 40, 52, 20, 38, 31, -36, -33, 6, 10, 11, 1, 67, -1, 11, 41, -19, 13, 54, -95, -12, -7, 13, -40, -51, -15, -44, -62, -27, 17, 10, -69, +-13, -11, 39, -17, 20, 7, -21, -57, -55, 14, 63, 38, 31, -1, 34, -5, 23, -17, -36, -66, 15, 5, 100, 69, -31, 6, 5, 37, -29, 0, 17, 0, -39, 13, 98, -10, -23, 6, 57, 66, 19, -23, -24, 4, -40, +26, 43, 1, 9, 24, -127, 15, -29, -54, -72, -2, -46, 43, 21, -12, 12, 6, 20, -37, -10, 6, 8, 10, -16, -43, 9, 65, 26, -24, 5, -18, -3, -29, -9, 32, 16, 24, 52, 61, 7, -21, 9, 21, 9, 41, +47, 16, -19, 26, 73, 57, 47, 1, -26, -4, 1, 27, -39, -12, 26, -15, 7, -8, 32, 14, -24, -5, 28, 13, 18, 45, 11, 17, -79, 18, 49, -15, -16, -14, -30, -22, 19, 39, 21, 33, -67, -45, 16, 11, -6, +50, -26, 12, -6, 33, -49, -38, -83, -15, -5, 20, -13, 39, -25, 25, -24, -14, -43, 4, -30, -64, -5, 32, -5, 50, -35, 37, 27, 22, -43, -44, -61, -32, -7, -33, -36, 2, 35, 4, 0, 67, 13, -27, -83, -59, +-58, -35, -7, -88, -104, -61, 22, 83, -1, -93, -14, -51, -101, -88, -4, -115, -90, -109, 12, -50, -20, -87, -61, -76, -73, -44, -80, -55, -71, -23, -50, -41, 6, -10, -25, -9, -37, -51, -31, -43, 13, -43, -44, -5, -15, +-13, 46, 33, -7, -57, 89, 4, -43, -25, 5, -50, -52, -71, -4, 11, -43, -48, 7, -8, 38, -93, 1, 22, -29, -23, 6, 23, -62, -35, -42, -42, 18, 21, -37, -94, -75, -41, -18, -29, 12, -35, -49, 1, -17, 30, +56, -24, 39, -27, -60, -13, -29, -12, 3, -68, 29, 7, -51, -36, 22, -34, 124, 59, 19, -42, -17, -100, -66, -53, -50, -34, -70, -28, 15, 26, 107, 35, -21, 29, 45, -35, -18, -11, 3, 63, 11, 22, -51, 8, -67, +33, -127, 55, -43, -83, -25, 38, 12, -13, 58, 33, 24, 86, -77, -65, 7, -44, -35, -2, 100, -65, 23, 45, -16, -53, 20, -9, -80, -20, -13, -12, 41, 8, -4, 22, -105, -23, -69, -36, -61, -4, 9, 10, -109, 22, +93, 57, 36, -23, 22, 41, 22, -33, 60, 4, 38, 22, 25, 88, 6, 40, -37, 64, 33, -94, 38, 91, 8, 39, 28, -4, 50, -69, -74, 31, 26, -26, 53, 11, 127, 59, 53, -68, 60, -24, -57, 44, 68, 18, 59, +23, -9, 94, 90, 18, 71, -12, -21, 31, 27, -43, 22, 36, 56, 65, -12, -44, -16, -1, 30, -31, -22, -2, 19, -19, 32, -11, -3, -15, 57, -26, -42, -33, 57, -65, 27, 12, 73, -2, -8, 36, 0, -25, -7, 0, +-24, 1, 45, 46, 47, 32, 52, 23, -47, -14, 8, -21, -5, -3, -34, 6, -1, 21, 4, 76, -90, -39, 30, 2, -65, -14, -44, 39, -24, 29, 47, -10, 49, -19, 9, 18, 21, 4, 24, -28, 63, 32, 5, -17, 59, +-30, -84, -6, 26, -51, 26, -31, 127, 15, -22, 42, -6, -22, -66, -19, 16, -12, -15, 24, 27, 7, 28, 31, -71, 25, 16, 35, -20, 63, -11, -25, -40, -64, -21, 20, -120, 37, 44, -29, -48, 71, -4, 32, -105, -95, +25, -1, 50, -11, 36, 3, -33, -1, 2, -25, 48, -29, -59, 11, 10, -73, -38, -67, -21, 1, -3, -69, 113, 23, -28, -17, -9, -41, -70, 22, 17, -5, -6, 8, -11, 13, -25, -33, 31, 20, 35, -2, 5, 60, 8, +-33, -127, -84, -51, -55, 20, -9, 49, 21, 27, 52, 30, -55, -31, -19, -43, -26, -22, 6, 10, 6, -40, -30, -26, -40, 12, 7, -3, -44, 60, -28, 0, -11, 26, -54, 8, -55, 65, 30, -65, 11, 22, -64, -16, 11, +17, -4, -17, -31, 10, 2, -43, -26, 4, 41, 22, -49, 5, -2, -19, 21, 37, -21, -41, -36, 30, 3, 26, -24, -37, -44, -1, -17, 33, 26, 20, 9, -7, -38, 55, -17, -53, 31, 18, -18, -27, 55, 2, -41, -37, +-18, 25, -39, -23, 42, -33, -19, 12, 30, -30, -39, -56, 31, -27, -1, 5, 17, 2, 14, -27, -7, 3, 69, -41, -17, 28, -40, -14, -36, -15, 50, -13, -12, -33, 38, -7, 27, -38, 27, 16, 25, -14, -20, -31, -3, +18, 7, -30, 87, 16, 49, 44, 52, -47, 22, 15, 25, 47, 49, -15, 32, 31, -42, 45, 127, -57, 47, 61, 13, 12, 42, -31, -16, -28, -9, -58, 49, -20, -30, -5, -12, -12, -10, -5, -46, -63, -55, 1, 50, -64, +-29, -29, -66, -39, -110, 32, 32, -4, -7, -17, 78, -28, -5, -27, 21, 9, -5, -6, 54, 4, -44, 5, 6, -72, 7, -29, -77, 33, -31, 10, 27, 60, 2, -21, -14, 5, 5, -38, -28, -60, -90, 7, -42, 81, 25, +-73, 38, -14, -52, -31, 33, -7, -66, -40, -18, 24, -41, -8, 27, -23, -107, -2, 26, -61, -103, 57, -54, -55, 48, 85, 61, 6, -76, -21, 49, -27, -34, -37, 10, 19, 25, 16, -59, -17, -12, -6, -32, -61, -64, 57, +-5, -35, 30, -2, -20, -32, -61, 40, -42, -41, 13, 0, -10, 9, -10, 24, -18, 5, -25, -13, -14, 0, -20, 86, 35, 29, 31, 39, -46, 42, 36, -14, 44, 16, -27, 39, 6, 23, 1, -18, -28, 13, -5, 97, 33, +17, 6, 38, -47, -13, -11, -34, -11, -9, 23, 101, -21, -46, 11, -6, -81, 31, -46, -40, 4, 11, -31, 112, -12, -65, 25, -16, 12, 30, -31, 30, -89, -50, 30, 83, 27, 12, 10, 71, -4, -36, 9, 27, -40, 64, +-48, -15, -29, 2, 12, -70, 20, -25, 3, -44, 9, 20, -19, -42, -55, -10, -44, -23, -31, -27, -1, -37, -85, -41, -81, 84, 99, -11, -94, -8, -75, -66, -59, -29, 23, -27, -41, 62, 54, -28, 18, -54, 4, 18, 35, +-47, 31, -56, -31, 21, -12, -21, -28, -14, 62, 59, -9, 3, 46, -57, -51, -115, -73, -30, -50, -26, 28, 26, -92, -18, 7, -42, -75, -103, -127, -44, -6, 21, 7, 0, -5, -7, 41, 17, -1, 23, 22, 39, 37, 53, +3, 34, 65, -15, 25, -79, 33, 7, 17, -8, -3, -12, -7, 54, -22, -29, 1, -42, -29, -13, 5, -26, 45, 2, 7, 4, -3, -2, -35, -10, 38, -36, -56, 4, 104, -84, 53, -43, -33, -31, -47, -19, 72, 5, -51, +82, 2, -5, 9, 77, 36, 14, -89, -9, 11, 49, 50, 40, 4, 37, 14, 76, 17, -12, 0, 7, -19, 106, 3, -8, 28, 68, -46, 85, 59, 32, 53, -17, 29, 33, -1, 63, 21, 41, -8, 16, -27, 6, 22, 3, +48, 43, 31, 7, -2, -64, 22, 0, 61, 47, 23, 15, -11, 50, 30, -26, -28, 27, -59, 15, 9, 2, -28, 18, -37, 37, 15, -23, -89, -54, -30, -54, -57, -21, 59, 25, -27, -32, 72, -127, -54, -30, -23, 17, -23, +-63, 72, -82, -15, 12, 17, -5, 1, 71, 71, -23, -81, -85, -8, -61, 8, -13, 2, -4, 85, -3, 22, -48, -12, 18, 7, -13, 44, 18, 17, -63, -74, 16, -1, -20, -53, 16, 123, -33, -19, -38, 50, -25, -25, -6, +-22, 40, -40, -43, 127, -46, -10, -32, -14, -76, -6, -98, 46, 67, 51, -33, 82, -50, 4, -81, 16, -19, -25, 4, 53, -22, -29, -17, -16, 43, -19, 40, 9, 17, -41, 78, -22, -11, 7, 71, -3, 76, -22, 64, 65, +89, -15, -32, 33, -38, -7, -12, 46, 7, -19, -61, -27, -48, -29, -50, 19, 68, -3, -7, 53, 29, 35, -38, -42, 6, 24, -8, 102, 80, 1, 52, -8, 17, 73, 3, -35, 59, 5, 4, 24, 32, 20, 28, -15, -2, +9, 56, 13, 0, -86, 119, -4, 2, -44, 16, -23, 9, -55, -10, 15, +}; + +const int32_t g_Weight7[] = { +46, 281, 76, -80, 102, -308, -329, -58, -155, 111, -373, -46, 123, -6, 19, 130, -92, -89, -143, 231, +}; + +const int8_t g_Weight8[] = { +-32, 113, 23, -12, -35, 32, -93, 60, 40, 90, -43, -127, 65, 114, -38, -58, 41, -39, -95, 127, 11, 69, -25, -68, -37, -68, -33, -127, -25, -46, -16, -12, 95, -11, 30, 127, 31, 93, 52, 118, 99, 119, -40, 45, 127, +42, 119, -37, -67, -36, 33, -43, 105, 50, -13, -127, 47, 56, -60, 90, 8, -127, -27, -41, -120, 28, 127, 65, 10, -36, 98, 51, 99, 66, 28, -52, 101, 94, 73, -58, -46, -6, -111, -105, 75, -125, -116, -59, 0, 59, +15, -20, -42, -5, 127, -127, -61, 19, 60, -76, -63, -127, -73, 37, 66, 43, 17, 46, 31, 37, -47, 20, -58, 13, -106, 127, 77, -63, 91, -25, 23, 127, -70, -41, 104, -49, 11, 108, 114, 34, -82, -66, 115, -86, 0, +4, 123, -127, 76, 55, -74, 31, 7, -16, -6, 52, 33, -25, 94, -73, 48, 10, 13, -17, -14, -57, -127, 86, 83, 127, 87, -63, 31, 18, 51, -82, 6, 127, -102, -8, -127, 60, -9, -16, -4, -88, -47, 7, -50, -45, +97, -30, 16, 33, 57, 61, -3, 56, 95, 127, 41, -99, -86, -67, 124, 19, -46, 69, -14, -127, +}; + +const int32_t g_Weight9[] = { +45, 72, -10, -74, 10, -24, -7, -8, 96, -18, +}; + diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/weight.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/weight.h new file mode 100755 index 00000000000..7922af39f46 --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist/src/weight.h @@ -0,0 +1,36 @@ + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "CMSIS/NN/Include/arm_nnfunctions.h" +#include "nnacl/fp32/softmax_fp32.h" +#include "nnacl/int8/quant_dtype_cast_int8.h" +#include +#include +extern unsigned char *g_Buffer; +enum STATUS { + RET_OK = 0, + RET_ERROR = 1, +}; + +extern const int8_t g_Weight1[]; +extern const int32_t g_Weight2[]; +extern const int8_t g_Weight3[]; +extern const int32_t g_Weight4[]; +extern const int8_t g_Weight6[]; +extern const int32_t g_Weight7[]; +extern const int8_t g_Weight8[]; +extern const int32_t g_Weight9[];