Initial commit
This commit is contained in:
commit
ce3dd83b9f
1470 changed files with 1054449 additions and 0 deletions
26
Drivers/CMSIS/NN/Source/NNSupportFunctions/CMakeLists.txt
Normal file
26
Drivers/CMSIS/NN/Source/NNSupportFunctions/CMakeLists.txt
Normal file
|
@ -0,0 +1,26 @@
|
|||
#
|
||||
# Copyright (c) 2019-2022 Arm Limited.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the License); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
file(GLOB SRC "./*_s8.c")
|
||||
target_sources(cmsis-nn PRIVATE ${SRC} arm_q7_to_q15_with_offset.c
|
||||
arm_nn_mat_mul_kernel_s16.c
|
||||
arm_q7_to_q15_with_offset.c
|
||||
arm_nn_mat_mul_kernel_s16.c
|
||||
arm_nn_vec_mat_mult_t_s16.c
|
||||
arm_q7_to_q15_no_shift.c)
|
||||
|
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
* Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the License); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* Project: CMSIS NN Library
|
||||
* Title: arm_nn_accumulate_q7_to_q15.c
|
||||
* Description: Accumulate q7 vector into q15 one.
|
||||
*
|
||||
* $Date: 20 July 2021
|
||||
* $Revision: V.1.1.2
|
||||
*
|
||||
* pSrc Processor: Cortex-M CPUs
|
||||
*
|
||||
* -------------------------------------------------------------------- */
|
||||
|
||||
#include "arm_nnfunctions.h"
|
||||
#include "arm_nnsupportfunctions.h"
|
||||
|
||||
/**
|
||||
* @ingroup groupSupport
|
||||
*/
|
||||
|
||||
/**
|
||||
* @addtogroup NNBasicMath
|
||||
* @{
|
||||
*/
|
||||
|
||||
void arm_nn_accumulate_q7_to_q15(q15_t *pDst, const q7_t *pSrc, uint32_t length)
|
||||
{
|
||||
q15_t *pCnt = pDst;
|
||||
const q7_t *pV = pSrc;
|
||||
int32_t count = length;
|
||||
#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI)
|
||||
q31_t v1, v2, vo1, vo2;
|
||||
count = length >> 2;
|
||||
q31_t in;
|
||||
|
||||
while (count > 0l)
|
||||
{
|
||||
q31_t value = arm_nn_read_q7x4_ia(&pV);
|
||||
v1 = __SXTB16(__ROR((uint32_t)value, 8));
|
||||
v2 = __SXTB16(value);
|
||||
#ifndef ARM_MATH_BIG_ENDIAN
|
||||
vo2 = (q31_t)__PKHTB(v1, v2, 16);
|
||||
vo1 = (q31_t)__PKHBT(v2, v1, 16);
|
||||
#else
|
||||
vo1 = (q31_t)__PKHTB(v1, v2, 16);
|
||||
vo2 = (q31_t)__PKHBT(v2, v1, 16);
|
||||
#endif
|
||||
|
||||
in = arm_nn_read_q15x2(pCnt);
|
||||
arm_nn_write_q15x2_ia(&pCnt, __QADD16(vo1, in));
|
||||
|
||||
in = arm_nn_read_q15x2(pCnt);
|
||||
arm_nn_write_q15x2_ia(&pCnt, __QADD16(vo2, in));
|
||||
|
||||
count--;
|
||||
}
|
||||
count = length & 0x3;
|
||||
#endif
|
||||
while (count > 0l)
|
||||
{
|
||||
*pCnt++ += *pV++;
|
||||
count--;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @} end of NNBasicMath group
|
||||
*/
|
82
Drivers/CMSIS/NN/Source/NNSupportFunctions/arm_nn_add_q7.c
Normal file
82
Drivers/CMSIS/NN/Source/NNSupportFunctions/arm_nn_add_q7.c
Normal file
|
@ -0,0 +1,82 @@
|
|||
/*
|
||||
* Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the License); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* Project: CMSIS NN Library
|
||||
* Title: arm_nn_add_q7.c
|
||||
* Description: Non saturating addition of elements of a q7 vector.
|
||||
*
|
||||
* $Date: 20. July 2021
|
||||
* $Revision: V.1.1.1
|
||||
*
|
||||
* Target Processor: Cortex-M cores
|
||||
*
|
||||
* -------------------------------------------------------------------- */
|
||||
|
||||
#include "arm_nn_tables.h"
|
||||
#include "arm_nnsupportfunctions.h"
|
||||
|
||||
/**
|
||||
* @ingroup groupSupport
|
||||
*/
|
||||
|
||||
/**
|
||||
* @addtogroup NNBasicMath
|
||||
* @{
|
||||
*/
|
||||
|
||||
void arm_nn_add_q7(const q7_t *input, q31_t *output, uint32_t block_size)
|
||||
{
|
||||
uint32_t block_count;
|
||||
q31_t result = 0;
|
||||
#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI)
|
||||
/* Loop unrolling: Compute 4 outputs at a time */
|
||||
block_count = block_size >> 2U;
|
||||
|
||||
while (block_count > 0U)
|
||||
{
|
||||
const int32_t mult_q15x2 = (1UL << 16) | 1UL;
|
||||
q31_t in_q7x4 = arm_nn_read_q7x4_ia(&input);
|
||||
q31_t temp_q15x2 = __SXTAB16(__SXTB16(in_q7x4), __ROR((uint32_t)in_q7x4, 8));
|
||||
|
||||
result = __SMLAD(temp_q15x2, mult_q15x2, result);
|
||||
|
||||
/* Decrement loop counter */
|
||||
block_count--;
|
||||
}
|
||||
|
||||
/* Loop unrolling: Compute remaining outputs */
|
||||
block_count = block_size & 0x3;
|
||||
#else
|
||||
block_count = block_size;
|
||||
#endif
|
||||
while (block_count > 0U)
|
||||
{
|
||||
/* Add and store result in destination buffer. */
|
||||
result += *input++;
|
||||
|
||||
/* Decrement loop counter */
|
||||
block_count--;
|
||||
}
|
||||
|
||||
*output = result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @} end of NNBasicMath group
|
||||
*/
|
|
@ -0,0 +1,168 @@
|
|||
/*
|
||||
* Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the License); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* Project: CMSIS NN Library
|
||||
* Title: arm_nn_depthwise_conv_nt_t_padded_s8.c
|
||||
* Description: Depthwise convolution with padded matrices.
|
||||
*
|
||||
* $Date: 09. October 2020
|
||||
* $Revision: V.1.0.2
|
||||
*
|
||||
* Target Processor: Cortex-M processors with MVE extension
|
||||
* -------------------------------------------------------------------- */
|
||||
|
||||
#include "arm_nnsupportfunctions.h"
|
||||
|
||||
/**
|
||||
* @ingroup groupSupport
|
||||
*/
|
||||
|
||||
/**
|
||||
* @addtogroup NNBasicMath
|
||||
* @{
|
||||
*/
|
||||
|
||||
/*
|
||||
* Depthwise convolution of transposed rhs matrix with 4 lhs matrices. One or more of the rhs matrices are padded.
|
||||
* Dimensions are the same for lhs and rhs.
|
||||
*
|
||||
* Refer header file for details.
|
||||
*
|
||||
*/
|
||||
|
||||
q7_t *arm_nn_depthwise_conv_nt_t_padded_s8(const q7_t *lhs,
|
||||
const q7_t *rhs,
|
||||
const int32_t input_offset,
|
||||
const uint16_t num_ch,
|
||||
const int32_t *out_shift,
|
||||
const int32_t *out_mult,
|
||||
const int32_t out_offset,
|
||||
const int32_t activation_min,
|
||||
const int32_t activation_max,
|
||||
const uint16_t row_x_col,
|
||||
const int32_t *const output_bias,
|
||||
q7_t *out)
|
||||
{
|
||||
#if defined(ARM_MATH_MVEI)
|
||||
int32_t loop_count = (num_ch + 3) / 4;
|
||||
const int32_t *bias = output_bias;
|
||||
uint32_t num_ch_to_process = num_ch;
|
||||
|
||||
for (int i_loop_cnt = 0, offset = 0; i_loop_cnt < loop_count;
|
||||
num_ch_to_process -= 4, out += 4, offset += 4, i_loop_cnt++)
|
||||
{
|
||||
int32x4_t out_0 = vldrwq_s32(bias);
|
||||
int32x4_t out_1 = out_0;
|
||||
int32x4_t out_2 = out_0;
|
||||
int32x4_t out_3 = out_0;
|
||||
bias += 4;
|
||||
|
||||
const int8_t *rhs_0 = rhs + offset;
|
||||
const int8_t *lhs_0 = lhs + offset;
|
||||
const int8_t *lhs_1 = lhs + row_x_col * num_ch + offset;
|
||||
const int8_t *lhs_2 = lhs + (row_x_col * num_ch * 2) + offset;
|
||||
const int8_t *lhs_3 = lhs + (row_x_col * num_ch * 3) + offset;
|
||||
|
||||
for (int i_row_x_col = 0; i_row_x_col < row_x_col; i_row_x_col++)
|
||||
{
|
||||
const int32x4_t ker_0 = vldrbq_s32(rhs_0);
|
||||
|
||||
int32x4_t ip_0 = vldrbq_s32(lhs_0);
|
||||
ip_0 = vaddq_n_s32(ip_0, input_offset);
|
||||
out_0 += vmulq_s32(ip_0, ker_0);
|
||||
|
||||
int32x4_t ip_1 = vldrbq_s32(lhs_1);
|
||||
ip_1 = vaddq_n_s32(ip_1, input_offset);
|
||||
out_1 += vmulq_s32(ip_1, ker_0);
|
||||
|
||||
int32x4_t ip_2 = vldrbq_s32(lhs_2);
|
||||
ip_2 = vaddq_n_s32(ip_2, input_offset);
|
||||
out_2 += vmulq_s32(ip_2, ker_0);
|
||||
|
||||
int32x4_t ip_3 = vldrbq_s32(lhs_3);
|
||||
ip_3 = vaddq_n_s32(ip_3, input_offset);
|
||||
|
||||
out_3 += vmulq_s32(ip_3, ker_0);
|
||||
|
||||
lhs_0 += num_ch;
|
||||
lhs_1 += num_ch;
|
||||
lhs_2 += num_ch;
|
||||
lhs_3 += num_ch;
|
||||
|
||||
rhs_0 += num_ch;
|
||||
}
|
||||
|
||||
const int32x4_t mult = vldrwq_s32(out_mult);
|
||||
const int32x4_t shift = vldrwq_s32(out_shift);
|
||||
out_mult += 4;
|
||||
out_shift += 4;
|
||||
|
||||
out_0 = arm_requantize_mve_32x4(out_0, mult, shift);
|
||||
out_0 = vaddq_n_s32(out_0, out_offset);
|
||||
out_0 = vmaxq_s32(out_0, vdupq_n_s32(activation_min));
|
||||
out_0 = vminq_s32(out_0, vdupq_n_s32(activation_max));
|
||||
mve_pred16_t p = vctp32q(num_ch_to_process);
|
||||
vstrbq_p_s32(out, out_0, p);
|
||||
|
||||
out_1 = arm_requantize_mve_32x4(out_1, mult, shift);
|
||||
out_1 = vaddq_n_s32(out_1, out_offset);
|
||||
out_1 = vmaxq_s32(out_1, vdupq_n_s32(activation_min));
|
||||
out_1 = vminq_s32(out_1, vdupq_n_s32(activation_max));
|
||||
vstrbq_p_s32(out + num_ch, out_1, p);
|
||||
|
||||
out_2 = arm_requantize_mve_32x4(out_2, mult, shift);
|
||||
out_2 = vaddq_n_s32(out_2, out_offset);
|
||||
out_2 = vmaxq_s32(out_2, vdupq_n_s32(activation_min));
|
||||
out_2 = vminq_s32(out_2, vdupq_n_s32(activation_max));
|
||||
vstrbq_p_s32(out + 2 * num_ch, out_2, p);
|
||||
|
||||
out_3 = arm_requantize_mve_32x4(out_3, mult, shift);
|
||||
out_3 = vaddq_n_s32(out_3, out_offset);
|
||||
out_3 = vmaxq_s32(out_3, vdupq_n_s32(activation_min));
|
||||
out_3 = vminq_s32(out_3, vdupq_n_s32(activation_max));
|
||||
vstrbq_p_s32(out + 3 * num_ch, out_3, p);
|
||||
}
|
||||
|
||||
const int tail_ch = num_ch & 0x3;
|
||||
if (tail_ch != 0)
|
||||
{
|
||||
out -= (4 - tail_ch);
|
||||
}
|
||||
return out + (3 * num_ch);
|
||||
|
||||
#else
|
||||
(void)lhs;
|
||||
(void)rhs;
|
||||
(void)input_offset;
|
||||
(void)num_ch;
|
||||
(void)out_shift;
|
||||
(void)out_mult;
|
||||
(void)out_offset;
|
||||
(void)activation_min;
|
||||
(void)activation_max;
|
||||
(void)row_x_col;
|
||||
(void)output_bias;
|
||||
(void)out;
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* @} end of NNBasicMath group
|
||||
*/
|
|
@ -0,0 +1,170 @@
|
|||
/*
|
||||
* Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the License); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* Project: CMSIS NN Library
|
||||
* Title: arm_nn_depthwise_conv_nt_t_s8.c
|
||||
* Description: Depthwise convolution on matrices with no padding.
|
||||
*
|
||||
* $Date: 09. October 2020
|
||||
* $Revision: V.1.0.2
|
||||
*
|
||||
* Target Processor: Cortex-M processors with MVE extension.
|
||||
* -------------------------------------------------------------------- */
|
||||
|
||||
#include "arm_nnsupportfunctions.h"
|
||||
|
||||
/**
|
||||
* @ingroup groupSupport
|
||||
*/
|
||||
|
||||
/**
|
||||
* @addtogroup NNBasicMath
|
||||
* @{
|
||||
*/
|
||||
|
||||
/*
|
||||
* Depthwise convolution of rhs matrix with 4 lhs matrices with no padding. Dimensions are the same for lhs and rhs.
|
||||
*
|
||||
* Refer header file for details.
|
||||
*
|
||||
*/
|
||||
|
||||
q7_t *arm_nn_depthwise_conv_nt_t_s8(const q7_t *lhs,
|
||||
const q7_t *rhs,
|
||||
const int32_t input_offset,
|
||||
const uint16_t num_ch,
|
||||
const int32_t *out_shift,
|
||||
const int32_t *out_mult,
|
||||
const int32_t out_offset,
|
||||
const int32_t activation_min,
|
||||
const int32_t activation_max,
|
||||
const uint16_t row_x_col,
|
||||
const int32_t *const output_bias,
|
||||
q7_t *out)
|
||||
{
|
||||
#if defined(ARM_MATH_MVEI)
|
||||
const int32_t *bias = output_bias;
|
||||
int32_t loop_count = (num_ch + 3) / 4;
|
||||
uint32_t num_ch_to_process = num_ch;
|
||||
|
||||
for (int i_loop_cnt = 0, offset = 0; i_loop_cnt < loop_count;
|
||||
num_ch_to_process -= 4, offset += 4, out += 4, i_loop_cnt++)
|
||||
{
|
||||
int32x4_t out_0 = vldrwq_s32(bias);
|
||||
int32x4_t out_1 = out_0;
|
||||
int32x4_t out_2 = out_0;
|
||||
int32x4_t out_3 = out_0;
|
||||
bias += 4;
|
||||
|
||||
const int8_t *rhs_0 = rhs + offset;
|
||||
const int8_t *lhs_0 = lhs + offset;
|
||||
const int8_t *lhs_1 = lhs + row_x_col * num_ch + offset;
|
||||
const int8_t *lhs_2 = lhs + (row_x_col * num_ch * 2) + offset;
|
||||
const int8_t *lhs_3 = lhs + (row_x_col * num_ch * 3) + offset;
|
||||
int32x4_t ker_sum = vdupq_n_s32(0);
|
||||
|
||||
for (int i_row_x_col = 0; i_row_x_col < row_x_col; i_row_x_col++)
|
||||
{
|
||||
const int32x4_t ker_0 = vldrbq_s32(rhs_0);
|
||||
ker_sum = vaddq_s32(ker_sum, ker_0);
|
||||
|
||||
int32x4_t ip_0 = vldrbq_s32(lhs_0);
|
||||
out_0 += vmulq_s32(ip_0, ker_0);
|
||||
|
||||
int32x4_t ip_1 = vldrbq_s32(lhs_1);
|
||||
out_1 += vmulq_s32(ip_1, ker_0);
|
||||
|
||||
int32x4_t ip_2 = vldrbq_s32(lhs_2);
|
||||
out_2 += vmulq_s32(ip_2, ker_0);
|
||||
|
||||
int32x4_t ip_3 = vldrbq_s32(lhs_3);
|
||||
out_3 += vmulq_s32(ip_3, ker_0);
|
||||
|
||||
lhs_0 += num_ch;
|
||||
lhs_1 += num_ch;
|
||||
lhs_2 += num_ch;
|
||||
lhs_3 += num_ch;
|
||||
|
||||
rhs_0 += num_ch;
|
||||
}
|
||||
|
||||
ker_sum = vmulq_n_s32(ker_sum, input_offset);
|
||||
out_0 = ker_sum + out_0;
|
||||
out_1 = ker_sum + out_1;
|
||||
out_2 = ker_sum + out_2;
|
||||
out_3 = ker_sum + out_3;
|
||||
|
||||
const int32x4_t mult = vldrwq_s32(out_mult);
|
||||
const int32x4_t shift = vldrwq_s32(out_shift);
|
||||
out_mult += 4;
|
||||
out_shift += 4;
|
||||
mve_pred16_t p = vctp32q(num_ch_to_process);
|
||||
|
||||
out_0 = arm_requantize_mve_32x4(out_0, mult, shift);
|
||||
out_0 = vaddq_n_s32(out_0, out_offset);
|
||||
out_0 = vmaxq_s32(out_0, vdupq_n_s32(activation_min));
|
||||
out_0 = vminq_s32(out_0, vdupq_n_s32(activation_max));
|
||||
vstrbq_p_s32(out, out_0, p);
|
||||
|
||||
out_1 = arm_requantize_mve_32x4(out_1, mult, shift);
|
||||
out_1 = vaddq_n_s32(out_1, out_offset);
|
||||
out_1 = vmaxq_s32(out_1, vdupq_n_s32(activation_min));
|
||||
out_1 = vminq_s32(out_1, vdupq_n_s32(activation_max));
|
||||
vstrbq_p_s32(out + num_ch, out_1, p);
|
||||
|
||||
out_2 = arm_requantize_mve_32x4(out_2, mult, shift);
|
||||
out_2 = vaddq_n_s32(out_2, out_offset);
|
||||
out_2 = vmaxq_s32(out_2, vdupq_n_s32(activation_min));
|
||||
out_2 = vminq_s32(out_2, vdupq_n_s32(activation_max));
|
||||
vstrbq_p_s32(out + 2 * num_ch, out_2, p);
|
||||
|
||||
out_3 = arm_requantize_mve_32x4(out_3, mult, shift);
|
||||
out_3 = vaddq_n_s32(out_3, out_offset);
|
||||
out_3 = vmaxq_s32(out_3, vdupq_n_s32(activation_min));
|
||||
out_3 = vminq_s32(out_3, vdupq_n_s32(activation_max));
|
||||
vstrbq_p_s32(out + 3 * num_ch, out_3, p);
|
||||
}
|
||||
|
||||
const int tail_ch = num_ch & 0x3;
|
||||
if (tail_ch != 0)
|
||||
{
|
||||
out -= (4 - tail_ch);
|
||||
}
|
||||
|
||||
return out + (3 * num_ch);
|
||||
#else
|
||||
(void)lhs;
|
||||
(void)rhs;
|
||||
(void)input_offset;
|
||||
(void)num_ch;
|
||||
(void)out_shift;
|
||||
(void)out_mult;
|
||||
(void)out_offset;
|
||||
(void)activation_min;
|
||||
(void)activation_max;
|
||||
(void)row_x_col;
|
||||
(void)output_bias;
|
||||
(void)out;
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* @} end of NNBasicMath group
|
||||
*/
|
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
* Copyright (C) 2010-2022 Arm Limited or its affiliates. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the License); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* Project: CMSIS NN Library
|
||||
* Title: arm_nn_mat_mul_core_1x_s8.c
|
||||
* Description: General Matrix-multiplication function
|
||||
*
|
||||
* $Date: 19. April 2022
|
||||
* $Revision: V.1.0.3
|
||||
*
|
||||
* Target Processor: Cortex-M cores
|
||||
* -------------------------------------------------------------------- */
|
||||
|
||||
#include "arm_nnsupportfunctions.h"
|
||||
|
||||
/**
|
||||
* @ingroup groupSupport
|
||||
*/
|
||||
|
||||
/**
|
||||
* @addtogroup NNBasicMath
|
||||
* @{
|
||||
*/
|
||||
|
||||
/*
|
||||
* s8 matrix multiplication to process 1 row
|
||||
*
|
||||
* Refer header file for details.
|
||||
*
|
||||
*/
|
||||
|
||||
arm_status arm_nn_mat_mul_core_1x_s8(int32_t row_elements,
|
||||
const int8_t *row_base,
|
||||
const int8_t *col_base,
|
||||
int32_t *const sum_col,
|
||||
int32_t *const output)
|
||||
{
|
||||
int32_t acc_n0 = 0;
|
||||
int32_t sum_tmp = 0;
|
||||
|
||||
#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE)
|
||||
|
||||
__ASM volatile(" vldrb.8 q0, [%[col]], #16 \n"
|
||||
" wlstp.8 lr, %[cnt], 1f \n"
|
||||
"2: \n"
|
||||
" vaddva.s8 %[sum], q0 \n"
|
||||
" vldrb.8 q1, [%[row0]], #16 \n"
|
||||
" vmladava.s8 %[out0], q0, q1 \n"
|
||||
" vldrb.8 q0, [%[col]], #16 \n"
|
||||
" letp lr, 2b \n"
|
||||
"1: \n"
|
||||
: [col] "+r"(col_base), [sum] "+Te"(sum_tmp), [row0] "+r"(row_base), [out0] "+Te"(acc_n0)
|
||||
: [cnt] "r"(row_elements)
|
||||
: "q0", "q1", "memory", "r14");
|
||||
#else
|
||||
for (int i = 0; i < row_elements; i++)
|
||||
{
|
||||
sum_tmp += col_base[i];
|
||||
acc_n0 += row_base[i] * col_base[i];
|
||||
}
|
||||
#endif
|
||||
|
||||
*sum_col = sum_tmp;
|
||||
*output = acc_n0;
|
||||
return ARM_MATH_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* @} end of NNBasicMath group
|
||||
*/
|
|
@ -0,0 +1,137 @@
|
|||
/*
|
||||
* Copyright (C) 2010-2022 Arm Limited or its affiliates.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the License); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* Project: CMSIS NN Library
|
||||
* Title: arm_nn_mat_mul_core_4x_s8.c
|
||||
* Description: General matrix multiplication function for MVE extension
|
||||
*
|
||||
* $Date: 19. April 2022
|
||||
* $Revision: V.3.0.1
|
||||
*
|
||||
* Target Processor: Cortex-M processors
|
||||
* -------------------------------------------------------------------- */
|
||||
#include "arm_nn_types.h"
|
||||
#include "arm_nnsupportfunctions.h"
|
||||
/**
|
||||
* @ingroup groupSupport
|
||||
*/
|
||||
|
||||
/**
|
||||
* @addtogroup NNBasicMath
|
||||
* @{
|
||||
*/
|
||||
|
||||
/*
|
||||
* s8 matrix multiplication to process 4 rows and one column
|
||||
*
|
||||
* Refer header file for details.
|
||||
*
|
||||
*/
|
||||
|
||||
int8_t *arm_nn_mat_mul_core_4x_s8(const int32_t row_elements,
|
||||
const int32_t offset,
|
||||
const int8_t *row_base,
|
||||
const int8_t *col_base_ref,
|
||||
const int32_t out_ch,
|
||||
const cmsis_nn_conv_params *conv_params,
|
||||
const cmsis_nn_per_channel_quant_params *quant_params,
|
||||
const int32_t *bias,
|
||||
int8_t *output)
|
||||
{
|
||||
|
||||
#if defined(ARM_MATH_MVEI)
|
||||
for (int i = 0; i < out_ch; i++)
|
||||
{
|
||||
int32_t acc_n0 = 0;
|
||||
int32_t acc_n1 = 0;
|
||||
int32_t acc_n2 = 0;
|
||||
int32_t acc_n3 = 0;
|
||||
|
||||
const int8_t *ip_row_0 = row_base;
|
||||
const int8_t *ip_row_1 = row_base + offset;
|
||||
const int8_t *ip_row_2 = row_base + (2 * offset);
|
||||
const int8_t *ip_row_3 = row_base + (3 * offset);
|
||||
const int8_t *col_base = col_base_ref + i * row_elements;
|
||||
int32_t sum_tmp = 0;
|
||||
|
||||
__ASM volatile(" vldrb.8 q0, [%[col]], #16 \n"
|
||||
" wlstp.8 lr, %[cnt], 1f \n"
|
||||
"2: \n"
|
||||
" vaddva.s8 %[sum], q0 \n"
|
||||
" vldrb.8 q1, [%[row0]], #16 \n"
|
||||
" vmladava.s8 %[out0], q0, q1 \n"
|
||||
" vldrb.8 q2, [%[row1]], #16 \n"
|
||||
" vmladava.s8 %[out1], q0, q2 \n"
|
||||
" vldrb.8 q3, [%[row2]], #16 \n"
|
||||
" vmladava.s8 %[out2], q0, q3 \n"
|
||||
" vldrb.8 q4, [%[row3]], #16 \n"
|
||||
" vmladava.s8 %[out3], q0, q4 \n"
|
||||
" vldrb.8 q0, [%[col]], #16 \n"
|
||||
" letp lr, 2b \n"
|
||||
"1: \n"
|
||||
: [col] "+r"(col_base),
|
||||
[sum] "+Te"(sum_tmp),
|
||||
[row0] "+r"(ip_row_0),
|
||||
[row1] "+r"(ip_row_1),
|
||||
[row2] "+r"(ip_row_2),
|
||||
[row3] "+r"(ip_row_3),
|
||||
[out0] "+Te"(acc_n0),
|
||||
[out1] "+Te"(acc_n1),
|
||||
[out2] "+Te"(acc_n2),
|
||||
[out3] "+Te"(acc_n3)
|
||||
: [cnt] "r"(row_elements)
|
||||
: "q0", "q1", "q2", "q3", "q4", "memory", "r14");
|
||||
|
||||
int32x4_t res = {acc_n0, acc_n1, acc_n2, acc_n3};
|
||||
sum_tmp *= conv_params->input_offset;
|
||||
if (bias)
|
||||
{
|
||||
sum_tmp += bias[i];
|
||||
}
|
||||
res = vaddq_n_s32(res, sum_tmp);
|
||||
|
||||
res = arm_requantize_mve(res, quant_params->multiplier[i], quant_params->shift[i]);
|
||||
res = vaddq_n_s32(res, conv_params->output_offset);
|
||||
|
||||
res = vmaxq_s32(res, vdupq_n_s32(conv_params->activation.min));
|
||||
res = vminq_s32(res, vdupq_n_s32(conv_params->activation.max));
|
||||
|
||||
const uint32x4_t scatter_offset = {0, (uint32_t)out_ch, (uint32_t)out_ch * 2, (uint32_t)out_ch * 3};
|
||||
vstrbq_scatter_offset_s32(output, scatter_offset, res);
|
||||
output++;
|
||||
}
|
||||
|
||||
return output + (3 * out_ch);
|
||||
#else
|
||||
(void)row_elements;
|
||||
(void)offset;
|
||||
(void)row_base;
|
||||
(void)col_base_ref;
|
||||
(void)out_ch;
|
||||
(void)conv_params;
|
||||
(void)quant_params;
|
||||
(void)bias;
|
||||
(void)output;
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* @} end of NNBasicMath group
|
||||
*/
|
|
@ -0,0 +1,250 @@
|
|||
/*
|
||||
* Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the License); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* Project: CMSIS NN Library
|
||||
* Title: arm_nn_mat_mult_kernel_s16.c
|
||||
* Description: Matrix-multiplication function for convolution
|
||||
*
|
||||
* $Date: 12 August 2021
|
||||
* $Revision: V.1.1.0
|
||||
*
|
||||
* Target Processor: Cortex-M cores
|
||||
* -------------------------------------------------------------------- */
|
||||
|
||||
#include "arm_nnfunctions.h"
|
||||
#include "arm_nnsupportfunctions.h"
|
||||
|
||||
/*
|
||||
* Matrix-multiplication function for convolution with per-channel requantization.
|
||||
*
|
||||
* Refer header file for details.
|
||||
*
|
||||
*/
|
||||
|
||||
q15_t *arm_nn_mat_mult_kernel_s16(const q7_t *input_a,
|
||||
const q15_t *input_b,
|
||||
const int32_t output_ch,
|
||||
const int32_t *out_shift,
|
||||
const int32_t *out_mult,
|
||||
const int16_t activation_min,
|
||||
const int16_t activation_max,
|
||||
const int32_t num_col_a,
|
||||
const int64_t *const output_bias,
|
||||
q15_t *out_0)
|
||||
{
|
||||
|
||||
#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI)
|
||||
/* set up the second output pointers */
|
||||
q15_t *out_1 = out_0 + output_ch;
|
||||
const int64_t *bias = output_bias;
|
||||
uint16_t row_count = output_ch / 2;
|
||||
const q7_t *ip_a0 = input_a;
|
||||
|
||||
/* this loop over rows in A */
|
||||
while (row_count)
|
||||
{
|
||||
/* setup pointers for B */
|
||||
const q15_t *ip_b0 = input_b;
|
||||
const q15_t *ip_b1 = ip_b0 + num_col_a;
|
||||
|
||||
/* align the second pointer for A */
|
||||
const q7_t *ip_a1 = ip_a0 + num_col_a;
|
||||
|
||||
/* Init accumulator for channel N and N + 1 */
|
||||
q31_t ch_0_out_0 = 0;
|
||||
q31_t ch_0_out_1 = 0;
|
||||
q31_t ch_1_out_0 = 0;
|
||||
q31_t ch_1_out_1 = 0;
|
||||
|
||||
uint16_t col_count = num_col_a / 4;
|
||||
/* accumulate over the vector */
|
||||
while (col_count)
|
||||
{
|
||||
q31_t a01, a02, a11, a12;
|
||||
q31_t b0 = arm_nn_read_q15x2_ia(&ip_b0);
|
||||
q31_t b1 = arm_nn_read_q15x2_ia(&ip_b1);
|
||||
|
||||
ip_a0 = read_and_pad(ip_a0, &a01, &a02);
|
||||
ip_a1 = read_and_pad(ip_a1, &a11, &a12);
|
||||
|
||||
ch_0_out_0 = __SMLAD(a01, b0, ch_0_out_0);
|
||||
ch_0_out_1 = __SMLAD(a01, b1, ch_0_out_1);
|
||||
ch_1_out_0 = __SMLAD(a11, b0, ch_1_out_0);
|
||||
ch_1_out_1 = __SMLAD(a11, b1, ch_1_out_1);
|
||||
|
||||
b0 = arm_nn_read_q15x2_ia(&ip_b0);
|
||||
b1 = arm_nn_read_q15x2_ia(&ip_b1);
|
||||
|
||||
ch_0_out_0 = __SMLAD(a02, b0, ch_0_out_0);
|
||||
ch_0_out_1 = __SMLAD(a02, b1, ch_0_out_1);
|
||||
ch_1_out_0 = __SMLAD(a12, b0, ch_1_out_0);
|
||||
ch_1_out_1 = __SMLAD(a12, b1, ch_1_out_1);
|
||||
|
||||
col_count--;
|
||||
} /* while over col_count */
|
||||
col_count = num_col_a & 0x3;
|
||||
while (col_count)
|
||||
{
|
||||
q7_t a0 = *ip_a0++;
|
||||
q15_t b0 = *ip_b0++;
|
||||
q7_t a1 = *ip_a1++;
|
||||
q15_t b1 = *ip_b1++;
|
||||
|
||||
ch_0_out_0 += a0 * b0;
|
||||
ch_0_out_1 += a0 * b1;
|
||||
ch_1_out_0 += a1 * b0;
|
||||
ch_1_out_1 += a1 * b1;
|
||||
col_count--;
|
||||
} /* while over col_count */
|
||||
if (bias)
|
||||
{
|
||||
q31_t reduced_multiplier = REDUCE_MULTIPLIER(*out_mult);
|
||||
q63_t acc_64 = ch_0_out_0 + *bias;
|
||||
ch_0_out_0 = arm_nn_requantize_s64(acc_64, reduced_multiplier, *out_shift);
|
||||
acc_64 = ch_0_out_1 + *bias++;
|
||||
ch_0_out_1 = arm_nn_requantize_s64(acc_64, reduced_multiplier, *out_shift);
|
||||
out_mult++;
|
||||
}
|
||||
else
|
||||
{
|
||||
ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift);
|
||||
ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift);
|
||||
out_mult++;
|
||||
}
|
||||
ch_0_out_0 = MAX(ch_0_out_0, activation_min);
|
||||
ch_0_out_0 = MIN(ch_0_out_0, activation_max);
|
||||
*out_0++ = (q15_t)ch_0_out_0;
|
||||
|
||||
ch_0_out_1 = MAX(ch_0_out_1, activation_min);
|
||||
ch_0_out_1 = MIN(ch_0_out_1, activation_max);
|
||||
*out_1++ = (q15_t)ch_0_out_1;
|
||||
out_shift++;
|
||||
|
||||
if (bias)
|
||||
{
|
||||
q31_t reduced_multiplier = REDUCE_MULTIPLIER(*out_mult);
|
||||
q63_t acc_64 = ch_1_out_0 + *bias;
|
||||
ch_1_out_0 = arm_nn_requantize_s64(acc_64, reduced_multiplier, *out_shift);
|
||||
acc_64 = ch_1_out_1 + *bias++;
|
||||
ch_1_out_1 = arm_nn_requantize_s64(acc_64, reduced_multiplier, *out_shift);
|
||||
out_mult++;
|
||||
}
|
||||
else
|
||||
{
|
||||
ch_1_out_0 = arm_nn_requantize(ch_1_out_0, *out_mult, *out_shift);
|
||||
ch_1_out_1 = arm_nn_requantize(ch_1_out_1, *out_mult, *out_shift);
|
||||
out_mult++;
|
||||
}
|
||||
ch_1_out_0 = MAX(ch_1_out_0, activation_min);
|
||||
ch_1_out_0 = MIN(ch_1_out_0, activation_max);
|
||||
*out_0++ = (q15_t)ch_1_out_0;
|
||||
|
||||
ch_1_out_1 = MAX(ch_1_out_1, activation_min);
|
||||
ch_1_out_1 = MIN(ch_1_out_1, activation_max);
|
||||
*out_1++ = (q15_t)ch_1_out_1;
|
||||
out_shift++;
|
||||
|
||||
/* skip row */
|
||||
ip_a0 += num_col_a;
|
||||
row_count--;
|
||||
}
|
||||
|
||||
/* compute the last odd numbered row if any */
|
||||
if (output_ch & 0x1)
|
||||
{
|
||||
/* setup pointers for B */
|
||||
const q15_t *ip_b0 = input_b;
|
||||
const q15_t *ip_b1 = ip_b0 + num_col_a;
|
||||
|
||||
q31_t ch_0_out_0 = 0;
|
||||
q31_t ch_0_out_1 = 0;
|
||||
|
||||
uint16_t col_count = num_col_a >> 2;
|
||||
while (col_count)
|
||||
{
|
||||
q31_t a01, a02;
|
||||
q31_t b0 = arm_nn_read_q15x2_ia(&ip_b0);
|
||||
q31_t b1 = arm_nn_read_q15x2_ia(&ip_b1);
|
||||
|
||||
ip_a0 = read_and_pad(ip_a0, &a01, &a02);
|
||||
|
||||
ch_0_out_0 = __SMLAD(a01, b0, ch_0_out_0);
|
||||
ch_0_out_1 = __SMLAD(a01, b1, ch_0_out_1);
|
||||
|
||||
b0 = arm_nn_read_q15x2_ia(&ip_b0);
|
||||
b1 = arm_nn_read_q15x2_ia(&ip_b1);
|
||||
ch_0_out_0 = __SMLAD(a02, b0, ch_0_out_0);
|
||||
ch_0_out_1 = __SMLAD(a02, b1, ch_0_out_1);
|
||||
|
||||
col_count--;
|
||||
}
|
||||
col_count = num_col_a & 0x3;
|
||||
while (col_count)
|
||||
{
|
||||
q7_t a0 = *ip_a0++;
|
||||
q15_t b0 = *ip_b0++;
|
||||
q15_t b1 = *ip_b1++;
|
||||
|
||||
ch_0_out_0 += a0 * b0;
|
||||
ch_0_out_1 += a0 * b1;
|
||||
col_count--;
|
||||
}
|
||||
if (bias)
|
||||
{
|
||||
q31_t reduced_multiplier = REDUCE_MULTIPLIER(*out_mult);
|
||||
q63_t acc_64 = ch_0_out_0 + *bias;
|
||||
ch_0_out_0 = arm_nn_requantize_s64(acc_64, reduced_multiplier, *out_shift);
|
||||
acc_64 = ch_0_out_1 + *bias++;
|
||||
ch_0_out_1 = arm_nn_requantize_s64(acc_64, reduced_multiplier, *out_shift);
|
||||
}
|
||||
else
|
||||
{
|
||||
ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift);
|
||||
ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift);
|
||||
}
|
||||
ch_0_out_0 = MAX(ch_0_out_0, activation_min);
|
||||
ch_0_out_0 = MIN(ch_0_out_0, activation_max);
|
||||
*out_0++ = (q15_t)ch_0_out_0;
|
||||
|
||||
ch_0_out_1 = MAX(ch_0_out_1, activation_min);
|
||||
ch_0_out_1 = MIN(ch_0_out_1, activation_max);
|
||||
*out_1++ = (q15_t)ch_0_out_1;
|
||||
out_mult++;
|
||||
out_shift++;
|
||||
}
|
||||
|
||||
out_0 += output_ch;
|
||||
|
||||
/* return the new output pointer with offset */
|
||||
return out_0;
|
||||
#else
|
||||
(void)input_a;
|
||||
(void)input_b;
|
||||
(void)output_ch;
|
||||
(void)out_shift;
|
||||
(void)out_mult;
|
||||
(void)activation_min;
|
||||
(void)activation_max;
|
||||
(void)num_col_a;
|
||||
(void)output_bias;
|
||||
(void)out_0;
|
||||
/* To be completed */
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
|
@ -0,0 +1,582 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the License); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* Project: CMSIS NN Library
|
||||
* Title: arm_nn_mat_mult_s8_nt_t_s8
|
||||
* Description: Matrix multiplication support function with the right-hand-side (rhs) matrix transposed
|
||||
*
|
||||
* $Date: 09. October 2020
|
||||
* $Revision: V.1.0.3
|
||||
*
|
||||
* Target Processor: Cortex-M
|
||||
*
|
||||
* -------------------------------------------------------------------- */
|
||||
|
||||
#include "arm_nnsupportfunctions.h"
|
||||
|
||||
/**
|
||||
* @ingroup groupSupport
|
||||
*/
|
||||
|
||||
/**
|
||||
* @addtogroup NNBasicMath
|
||||
* @{
|
||||
*/
|
||||
|
||||
/*
|
||||
* s8 matrix multiplication with the right-hand-side matrix transposed
|
||||
*
|
||||
* Refer header file for details.
|
||||
*
|
||||
*/
|
||||
arm_status arm_nn_mat_mult_nt_t_s8(const q7_t *lhs,
|
||||
const q7_t *rhs,
|
||||
const q31_t *bias,
|
||||
q7_t *dst,
|
||||
const int32_t *dst_multipliers,
|
||||
const int32_t *dst_shifts,
|
||||
const int32_t lhs_rows,
|
||||
const int32_t rhs_rows,
|
||||
const int32_t rhs_cols,
|
||||
const int32_t lhs_offset,
|
||||
const int32_t dst_offset,
|
||||
const int32_t activation_min,
|
||||
const int32_t activation_max)
|
||||
{
|
||||
#if defined(ARM_MATH_DSP)
|
||||
const int32_t off0 = rhs_cols - 4;
|
||||
|
||||
for (int32_t rhs_rows_idx = 0; rhs_rows_idx <= (rhs_rows - 2); rhs_rows_idx += 2)
|
||||
{
|
||||
const q7_t *lhs_ptr = &lhs[0];
|
||||
q7_t *dst_ptr = &dst[0];
|
||||
|
||||
q31_t lhs_offset_contribution0 = 0;
|
||||
q31_t lhs_offset_contribution1 = 0;
|
||||
|
||||
for (int32_t x = 0; x < rhs_cols; ++x)
|
||||
{
|
||||
lhs_offset_contribution0 += rhs[x];
|
||||
lhs_offset_contribution1 += rhs[x + rhs_cols];
|
||||
}
|
||||
|
||||
lhs_offset_contribution0 *= lhs_offset;
|
||||
lhs_offset_contribution1 *= lhs_offset;
|
||||
if (bias)
|
||||
{
|
||||
lhs_offset_contribution0 += bias[rhs_rows_idx];
|
||||
lhs_offset_contribution1 += bias[rhs_rows_idx + 1];
|
||||
}
|
||||
|
||||
int32_t lhs_rows_idx = lhs_rows >> 1;
|
||||
|
||||
while (lhs_rows_idx)
|
||||
{
|
||||
const q7_t *rhs_ptr = &rhs[0];
|
||||
|
||||
q31_t res00 = lhs_offset_contribution0;
|
||||
q31_t res01 = lhs_offset_contribution1;
|
||||
q31_t res10 = lhs_offset_contribution0;
|
||||
q31_t res11 = lhs_offset_contribution1;
|
||||
|
||||
int32_t rhs_cols_idx = 0;
|
||||
|
||||
q31_t val0, val1, val2, val3, val4, val5;
|
||||
|
||||
for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16)
|
||||
{
|
||||
val1 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr);
|
||||
val2 = __SXTB16(val1);
|
||||
val0 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr);
|
||||
val3 = __SXTB16(val0);
|
||||
val4 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]);
|
||||
val1 = __SXTB16_RORn(val1, 8);
|
||||
val0 = __SXTB16_RORn(val0, 8);
|
||||
|
||||
// 4 x MAC res00, res01
|
||||
res00 = __SMLAD(val3, val2, res00);
|
||||
val5 = __SXTB16(val4);
|
||||
res00 = __SMLAD(val0, val1, res00);
|
||||
val4 = __SXTB16_RORn(val4, 8);
|
||||
res01 = __SMLAD(val3, val5, res01);
|
||||
res01 = __SMLAD(val0, val4, res01);
|
||||
|
||||
// 4 x MAC res10, res11
|
||||
val0 = arm_nn_read_q7x4((const q7_t *)&lhs_ptr[off0]);
|
||||
val3 = __SXTB16(val0);
|
||||
val0 = __SXTB16_RORn(val0, 8);
|
||||
res10 = __SMLAD(val3, val2, res10);
|
||||
res11 = __SMLAD(val3, val5, res11);
|
||||
res10 = __SMLAD(val0, val1, res10);
|
||||
val1 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr);
|
||||
res11 = __SMLAD(val0, val4, res11);
|
||||
|
||||
val4 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]);
|
||||
val2 = __SXTB16(val1);
|
||||
val0 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr);
|
||||
val3 = __SXTB16(val0);
|
||||
val1 = __SXTB16_RORn(val1, 8);
|
||||
val0 = __SXTB16_RORn(val0, 8);
|
||||
|
||||
// 4 x MAC res00, res01
|
||||
res00 = __SMLAD(val3, val2, res00);
|
||||
val5 = __SXTB16(val4);
|
||||
res00 = __SMLAD(val0, val1, res00);
|
||||
val4 = __SXTB16_RORn(val4, 8);
|
||||
res01 = __SMLAD(val3, val5, res01);
|
||||
res01 = __SMLAD(val0, val4, res01);
|
||||
|
||||
// 4 x MAC res10, res11
|
||||
val0 = arm_nn_read_q7x4((const q7_t *)&lhs_ptr[off0]);
|
||||
val3 = __SXTB16(val0);
|
||||
val0 = __SXTB16_RORn(val0, 8);
|
||||
res10 = __SMLAD(val3, val2, res10);
|
||||
res11 = __SMLAD(val3, val5, res11);
|
||||
res10 = __SMLAD(val0, val1, res10);
|
||||
val1 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr);
|
||||
res11 = __SMLAD(val0, val4, res11);
|
||||
|
||||
val4 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]);
|
||||
val2 = __SXTB16(val1);
|
||||
val0 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr);
|
||||
val3 = __SXTB16(val0);
|
||||
val1 = __SXTB16_RORn(val1, 8);
|
||||
val0 = __SXTB16_RORn(val0, 8);
|
||||
|
||||
// 4 x MAC res00, res01
|
||||
res00 = __SMLAD(val3, val2, res00);
|
||||
val5 = __SXTB16(val4);
|
||||
res00 = __SMLAD(val0, val1, res00);
|
||||
val4 = __SXTB16_RORn(val4, 8);
|
||||
res01 = __SMLAD(val3, val5, res01);
|
||||
res01 = __SMLAD(val0, val4, res01);
|
||||
|
||||
// 4 x MAC res10, res11
|
||||
val0 = arm_nn_read_q7x4((const q7_t *)&lhs_ptr[off0]);
|
||||
val3 = __SXTB16(val0);
|
||||
val0 = __SXTB16_RORn(val0, 8);
|
||||
res10 = __SMLAD(val3, val2, res10);
|
||||
res11 = __SMLAD(val3, val5, res11);
|
||||
res10 = __SMLAD(val0, val1, res10);
|
||||
val1 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr);
|
||||
res11 = __SMLAD(val0, val4, res11);
|
||||
|
||||
val4 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]);
|
||||
val2 = __SXTB16(val1);
|
||||
val0 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr);
|
||||
val3 = __SXTB16(val0);
|
||||
val1 = __SXTB16_RORn(val1, 8);
|
||||
val0 = __SXTB16_RORn(val0, 8);
|
||||
|
||||
// 4 x MAC res00, res01
|
||||
res00 = __SMLAD(val3, val2, res00);
|
||||
val5 = __SXTB16(val4);
|
||||
res00 = __SMLAD(val0, val1, res00);
|
||||
val4 = __SXTB16_RORn(val4, 8);
|
||||
res01 = __SMLAD(val3, val5, res01);
|
||||
res01 = __SMLAD(val0, val4, res01);
|
||||
|
||||
// 4 x MAC res10, res11
|
||||
val0 = arm_nn_read_q7x4((const q7_t *)&lhs_ptr[off0]);
|
||||
val3 = __SXTB16(val0);
|
||||
val0 = __SXTB16_RORn(val0, 8);
|
||||
res10 = __SMLAD(val3, val2, res10);
|
||||
res11 = __SMLAD(val3, val5, res11);
|
||||
res10 = __SMLAD(val0, val1, res10);
|
||||
res11 = __SMLAD(val0, val4, res11);
|
||||
}
|
||||
|
||||
for (; rhs_cols_idx < rhs_cols; ++rhs_cols_idx)
|
||||
{
|
||||
q7_t rhs_value0 = rhs_ptr[0];
|
||||
q7_t rhs_value1 = rhs_ptr[rhs_cols];
|
||||
q7_t lhs_value = lhs_ptr[0];
|
||||
|
||||
res00 += lhs_value * rhs_value0;
|
||||
res01 += lhs_value * rhs_value1;
|
||||
|
||||
lhs_value = lhs_ptr[rhs_cols];
|
||||
res10 += lhs_value * rhs_value0;
|
||||
res11 += lhs_value * rhs_value1;
|
||||
|
||||
++rhs_ptr;
|
||||
++lhs_ptr;
|
||||
}
|
||||
|
||||
// Quantize down
|
||||
res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]);
|
||||
res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]);
|
||||
res10 = arm_nn_requantize(res10, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]);
|
||||
res11 = arm_nn_requantize(res11, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]);
|
||||
|
||||
// Add offset
|
||||
res00 += dst_offset;
|
||||
res01 += dst_offset;
|
||||
res10 += dst_offset;
|
||||
res11 += dst_offset;
|
||||
|
||||
// Clamp the result
|
||||
res00 = MAX(res00, activation_min);
|
||||
res00 = MIN(res00, activation_max);
|
||||
res01 = MAX(res01, activation_min);
|
||||
res01 = MIN(res01, activation_max);
|
||||
res10 = MAX(res10, activation_min);
|
||||
res10 = MIN(res10, activation_max);
|
||||
res11 = MAX(res11, activation_min);
|
||||
res11 = MIN(res11, activation_max);
|
||||
|
||||
dst_ptr[0] = (q7_t)res00;
|
||||
dst_ptr[1] = (q7_t)res01;
|
||||
dst_ptr += rhs_rows;
|
||||
dst_ptr[0] = (q7_t)res10;
|
||||
dst_ptr[1] = (q7_t)res11;
|
||||
dst_ptr += rhs_rows;
|
||||
|
||||
lhs_ptr += rhs_cols;
|
||||
|
||||
lhs_rows_idx--;
|
||||
}
|
||||
|
||||
// Left-over rows
|
||||
if (lhs_rows % 2)
|
||||
{
|
||||
const q7_t *rhs_ptr = &rhs[0];
|
||||
|
||||
q31_t res00 = lhs_offset_contribution0;
|
||||
q31_t res01 = lhs_offset_contribution1;
|
||||
|
||||
int32_t rhs_cols_idx = 0;
|
||||
|
||||
q31_t val0, val1, val2, val3, val4, val5;
|
||||
for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16)
|
||||
{
|
||||
val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr);
|
||||
val1 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]);
|
||||
val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr);
|
||||
val3 = __SXTB16(val0);
|
||||
val5 = __SXTB16(val2);
|
||||
val4 = __SXTB16(val1);
|
||||
val0 = __SXTB16_RORn(val0, 8);
|
||||
val2 = __SXTB16_RORn(val2, 8);
|
||||
val1 = __SXTB16_RORn(val1, 8);
|
||||
|
||||
// 4 x MAC res00, res01
|
||||
res00 = __SMLAD(val5, val3, res00);
|
||||
res00 = __SMLAD(val2, val0, res00);
|
||||
res01 = __SMLAD(val5, val4, res01);
|
||||
res01 = __SMLAD(val2, val1, res01);
|
||||
|
||||
val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr);
|
||||
val1 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]);
|
||||
val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr);
|
||||
val3 = __SXTB16(val0);
|
||||
val5 = __SXTB16(val2);
|
||||
val4 = __SXTB16(val1);
|
||||
val0 = __SXTB16_RORn(val0, 8);
|
||||
val2 = __SXTB16_RORn(val2, 8);
|
||||
val1 = __SXTB16_RORn(val1, 8);
|
||||
|
||||
// 4 x MAC res00, res01
|
||||
res00 = __SMLAD(val5, val3, res00);
|
||||
res00 = __SMLAD(val2, val0, res00);
|
||||
res01 = __SMLAD(val5, val4, res01);
|
||||
res01 = __SMLAD(val2, val1, res01);
|
||||
|
||||
val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr);
|
||||
val1 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]);
|
||||
val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr);
|
||||
val3 = __SXTB16(val0);
|
||||
val5 = __SXTB16(val2);
|
||||
val4 = __SXTB16(val1);
|
||||
val0 = __SXTB16_RORn(val0, 8);
|
||||
val2 = __SXTB16_RORn(val2, 8);
|
||||
val1 = __SXTB16_RORn(val1, 8);
|
||||
|
||||
// 4 x MAC res00, res01
|
||||
res00 = __SMLAD(val5, val3, res00);
|
||||
res00 = __SMLAD(val2, val0, res00);
|
||||
res01 = __SMLAD(val5, val4, res01);
|
||||
res01 = __SMLAD(val2, val1, res01);
|
||||
|
||||
val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr);
|
||||
val1 = arm_nn_read_q7x4((const q7_t *)&rhs_ptr[off0]);
|
||||
val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr);
|
||||
val3 = __SXTB16(val0);
|
||||
val5 = __SXTB16(val2);
|
||||
val4 = __SXTB16(val1);
|
||||
val0 = __SXTB16_RORn(val0, 8);
|
||||
val2 = __SXTB16_RORn(val2, 8);
|
||||
val1 = __SXTB16_RORn(val1, 8);
|
||||
|
||||
// 4 x MAC res00, res01
|
||||
res00 = __SMLAD(val5, val3, res00);
|
||||
res00 = __SMLAD(val2, val0, res00);
|
||||
res01 = __SMLAD(val5, val4, res01);
|
||||
res01 = __SMLAD(val2, val1, res01);
|
||||
}
|
||||
|
||||
// Left-over accumulations
|
||||
for (; rhs_cols_idx < rhs_cols; ++rhs_cols_idx)
|
||||
{
|
||||
q7_t rhs_value0 = rhs_ptr[0];
|
||||
q7_t rhs_value1 = rhs_ptr[rhs_cols];
|
||||
q7_t lhs_value = lhs_ptr[0];
|
||||
|
||||
res00 += lhs_value * rhs_value0;
|
||||
res01 += lhs_value * rhs_value1;
|
||||
|
||||
++rhs_ptr;
|
||||
++lhs_ptr;
|
||||
}
|
||||
|
||||
// Quantize down
|
||||
res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]);
|
||||
res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]);
|
||||
|
||||
// Add offset
|
||||
res00 += dst_offset;
|
||||
res01 += dst_offset;
|
||||
|
||||
// Clamp the result
|
||||
res00 = MAX(res00, activation_min);
|
||||
res00 = MIN(res00, activation_max);
|
||||
res01 = MAX(res01, activation_min);
|
||||
res01 = MIN(res01, activation_max);
|
||||
|
||||
dst_ptr[0] = (q7_t)res00;
|
||||
dst_ptr[1] = (q7_t)res01;
|
||||
}
|
||||
|
||||
rhs += 2 * rhs_cols;
|
||||
dst += 2;
|
||||
}
|
||||
|
||||
if (rhs_rows % 2)
|
||||
{
|
||||
const q7_t *lhs_ptr = &lhs[0];
|
||||
q7_t *dst_ptr = &dst[0];
|
||||
|
||||
for (int32_t lhs_rows_idx = 0; lhs_rows_idx < lhs_rows; ++lhs_rows_idx)
|
||||
{
|
||||
const q7_t *rhs_ptr = &rhs[0];
|
||||
q31_t res00 = 0;
|
||||
if (bias)
|
||||
{
|
||||
res00 = bias[rhs_rows - 1];
|
||||
}
|
||||
|
||||
for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx)
|
||||
{
|
||||
q31_t rhs_value = rhs_ptr[0];
|
||||
q31_t lhs_value = lhs_ptr[0] + lhs_offset;
|
||||
|
||||
res00 += lhs_value * rhs_value;
|
||||
|
||||
++rhs_ptr;
|
||||
++lhs_ptr;
|
||||
}
|
||||
|
||||
// Quantize down
|
||||
res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows - 1], dst_shifts[rhs_rows - 1]);
|
||||
|
||||
// Add offset
|
||||
res00 += dst_offset;
|
||||
|
||||
// Clamp the result
|
||||
res00 = MAX(res00, activation_min);
|
||||
res00 = MIN(res00, activation_max);
|
||||
|
||||
dst_ptr[0] = (q7_t)res00;
|
||||
dst_ptr += rhs_rows;
|
||||
}
|
||||
}
|
||||
#else
|
||||
for (int32_t rhs_rows_idx = 0; rhs_rows_idx <= (rhs_rows - 2); rhs_rows_idx += 2)
|
||||
{
|
||||
const q7_t *lhs_ptr = &lhs[0];
|
||||
q7_t *dst_ptr = &dst[0];
|
||||
|
||||
q31_t lhs_offset_contribution0 = 0;
|
||||
q31_t lhs_offset_contribution1 = 0;
|
||||
|
||||
for (int32_t x = 0; x < rhs_cols; ++x)
|
||||
{
|
||||
lhs_offset_contribution0 += rhs[x];
|
||||
lhs_offset_contribution1 += rhs[x + rhs_cols];
|
||||
}
|
||||
|
||||
lhs_offset_contribution0 *= lhs_offset;
|
||||
lhs_offset_contribution1 *= lhs_offset;
|
||||
if (bias)
|
||||
{
|
||||
lhs_offset_contribution0 += bias[rhs_rows_idx];
|
||||
lhs_offset_contribution1 += bias[rhs_rows_idx + 1];
|
||||
}
|
||||
|
||||
int32_t lhs_rows_idx = lhs_rows >> 1;
|
||||
|
||||
while (lhs_rows_idx)
|
||||
{
|
||||
const q7_t *rhs_ptr = &rhs[0];
|
||||
|
||||
q31_t res00 = lhs_offset_contribution0;
|
||||
q31_t res01 = lhs_offset_contribution1;
|
||||
q31_t res10 = lhs_offset_contribution0;
|
||||
q31_t res11 = lhs_offset_contribution1;
|
||||
|
||||
for (int32_t rhs_cols_idx = rhs_cols; rhs_cols_idx != 0; rhs_cols_idx--)
|
||||
{
|
||||
q7_t rhs_value0 = rhs_ptr[0];
|
||||
q7_t rhs_value1 = rhs_ptr[rhs_cols];
|
||||
q7_t lhs_value = lhs_ptr[0];
|
||||
|
||||
res00 += lhs_value * rhs_value0;
|
||||
res01 += lhs_value * rhs_value1;
|
||||
|
||||
lhs_value = lhs_ptr[rhs_cols];
|
||||
res10 += lhs_value * rhs_value0;
|
||||
res11 += lhs_value * rhs_value1;
|
||||
|
||||
++rhs_ptr;
|
||||
++lhs_ptr;
|
||||
}
|
||||
|
||||
// Quantize down
|
||||
res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]);
|
||||
res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]);
|
||||
res10 = arm_nn_requantize(res10, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]);
|
||||
res11 = arm_nn_requantize(res11, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]);
|
||||
|
||||
// Add offset
|
||||
res00 += dst_offset;
|
||||
res01 += dst_offset;
|
||||
res10 += dst_offset;
|
||||
res11 += dst_offset;
|
||||
|
||||
// Clamp the result
|
||||
res00 = MAX(res00, activation_min);
|
||||
res00 = MIN(res00, activation_max);
|
||||
res01 = MAX(res01, activation_min);
|
||||
res01 = MIN(res01, activation_max);
|
||||
res10 = MAX(res10, activation_min);
|
||||
res10 = MIN(res10, activation_max);
|
||||
res11 = MAX(res11, activation_min);
|
||||
res11 = MIN(res11, activation_max);
|
||||
|
||||
dst_ptr[0] = (q7_t)res00;
|
||||
dst_ptr[1] = (q7_t)res01;
|
||||
dst_ptr += rhs_rows;
|
||||
dst_ptr[0] = (q7_t)res10;
|
||||
dst_ptr[1] = (q7_t)res11;
|
||||
dst_ptr += rhs_rows;
|
||||
|
||||
lhs_ptr += rhs_cols;
|
||||
|
||||
lhs_rows_idx--;
|
||||
}
|
||||
|
||||
// Left-over rows
|
||||
if (lhs_rows % 2)
|
||||
{
|
||||
const q7_t *rhs_ptr = &rhs[0];
|
||||
|
||||
q31_t res00 = lhs_offset_contribution0;
|
||||
q31_t res01 = lhs_offset_contribution1;
|
||||
|
||||
for (int32_t rhs_cols_idx = rhs_cols; rhs_cols_idx != 0; rhs_cols_idx--)
|
||||
{
|
||||
q7_t rhs_value0 = rhs_ptr[0];
|
||||
q7_t rhs_value1 = rhs_ptr[rhs_cols];
|
||||
q7_t lhs_value = lhs_ptr[0];
|
||||
|
||||
res00 += lhs_value * rhs_value0;
|
||||
res01 += lhs_value * rhs_value1;
|
||||
|
||||
++rhs_ptr;
|
||||
++lhs_ptr;
|
||||
}
|
||||
|
||||
// Quantize down
|
||||
res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows_idx], dst_shifts[rhs_rows_idx]);
|
||||
res01 = arm_nn_requantize(res01, dst_multipliers[rhs_rows_idx + 1], dst_shifts[rhs_rows_idx + 1]);
|
||||
|
||||
// Add offset
|
||||
res00 += dst_offset;
|
||||
res01 += dst_offset;
|
||||
|
||||
// Clamp the result
|
||||
res00 = MAX(res00, activation_min);
|
||||
res00 = MIN(res00, activation_max);
|
||||
res01 = MAX(res01, activation_min);
|
||||
res01 = MIN(res01, activation_max);
|
||||
|
||||
dst_ptr[0] = (q7_t)res00;
|
||||
dst_ptr[1] = (q7_t)res01;
|
||||
}
|
||||
|
||||
rhs += 2 * rhs_cols;
|
||||
dst += 2;
|
||||
}
|
||||
|
||||
if (rhs_rows % 2)
|
||||
{
|
||||
const q7_t *lhs_ptr = &lhs[0];
|
||||
q7_t *dst_ptr = &dst[0];
|
||||
|
||||
for (int32_t lhs_rows_idx = 0; lhs_rows_idx < lhs_rows; ++lhs_rows_idx)
|
||||
{
|
||||
const q7_t *rhs_ptr = &rhs[0];
|
||||
q31_t res00 = 0;
|
||||
if (bias)
|
||||
{
|
||||
res00 = bias[rhs_rows - 1];
|
||||
}
|
||||
|
||||
for (int32_t rhs_cols_idx = rhs_cols; rhs_cols_idx != 0; rhs_cols_idx--)
|
||||
{
|
||||
q31_t rhs_value = rhs_ptr[0];
|
||||
q31_t lhs_value = lhs_ptr[0] + lhs_offset;
|
||||
|
||||
res00 += lhs_value * rhs_value;
|
||||
|
||||
++rhs_ptr;
|
||||
++lhs_ptr;
|
||||
}
|
||||
|
||||
// Quantize down
|
||||
res00 = arm_nn_requantize(res00, dst_multipliers[rhs_rows - 1], dst_shifts[rhs_rows - 1]);
|
||||
|
||||
// Add offset
|
||||
res00 += dst_offset;
|
||||
|
||||
// Clamp the result
|
||||
res00 = MAX(res00, activation_min);
|
||||
res00 = MIN(res00, activation_max);
|
||||
|
||||
dst_ptr[0] = (q7_t)res00;
|
||||
dst_ptr += rhs_rows;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return ARM_MATH_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* @} end of NNBasicMath group
|
||||
*/
|
73
Drivers/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mult_q15.c
Normal file
73
Drivers/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mult_q15.c
Normal file
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
* Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the License); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* Project: CMSIS NN Library
|
||||
* Title: arm_nn_mult_q15.c
|
||||
* Description: Q15 vector multiplication with variable output shifts
|
||||
*
|
||||
* $Date: 20. July 2021
|
||||
* $Revision: V.1.1.2
|
||||
*
|
||||
* Target Processor: Cortex-M cores
|
||||
*
|
||||
* -------------------------------------------------------------------- */
|
||||
|
||||
#include "arm_nnsupportfunctions.h"
|
||||
|
||||
/**
|
||||
* @ingroup groupSupport
|
||||
*/
|
||||
|
||||
/**
|
||||
* @addtogroup NNBasicMath
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Q7 vector multiplication with variable output shifts
|
||||
* @param[in] *pSrcA pointer to the first input vector
|
||||
* @param[in] *pSrcB pointer to the second input vector
|
||||
* @param[out] *pDst pointer to the output vector
|
||||
* @param[in] out_shift amount of right-shift for output
|
||||
* @param[in] blockSize number of samples in each vector
|
||||
*
|
||||
* <b>Scaling and Overflow Behavior:</b>
|
||||
* \par
|
||||
* The function uses saturating arithmetic.
|
||||
* Results outside of the allowable Q15 range [0x8000 0x7FFF] will be saturated.
|
||||
*/
|
||||
|
||||
void arm_nn_mult_q15(q15_t *pSrcA, q15_t *pSrcB, q15_t *pDst, const uint16_t out_shift, uint32_t blockSize)
|
||||
{
|
||||
uint32_t blkCnt = blockSize; /* loop counters */
|
||||
|
||||
while (blkCnt > 0U)
|
||||
{
|
||||
/* C = A * B */
|
||||
/* Multiply the inputs and store the result in the destination buffer */
|
||||
*pDst++ = (q15_t)__SSAT(((q31_t)((q31_t)(*pSrcA++) * (*pSrcB++) + NN_ROUND(out_shift)) >> out_shift), 16);
|
||||
|
||||
/* Decrement the blockSize loop counter */
|
||||
blkCnt--;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @} end of NNBasicMath group
|
||||
*/
|
73
Drivers/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mult_q7.c
Normal file
73
Drivers/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mult_q7.c
Normal file
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
* Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the License); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* Project: CMSIS NN Library
|
||||
* Title: arm_nn_mult_q7.c
|
||||
* Description: Q7 vector multiplication with variable output shifts
|
||||
*
|
||||
* $Date: 20. July 2021
|
||||
* $Revision: V.1.1.2
|
||||
*
|
||||
* Target Processor: Cortex-M cores
|
||||
*
|
||||
* -------------------------------------------------------------------- */
|
||||
|
||||
#include "arm_nnsupportfunctions.h"
|
||||
|
||||
/**
|
||||
* @ingroup groupSupport
|
||||
*/
|
||||
|
||||
/**
|
||||
* @addtogroup NNBasicMath
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Q7 vector multiplication with variable output shifts
|
||||
* @param[in] *pSrcA pointer to the first input vector
|
||||
* @param[in] *pSrcB pointer to the second input vector
|
||||
* @param[out] *pDst pointer to the output vector
|
||||
* @param[in] out_shift amount of right-shift for output
|
||||
* @param[in] blockSize number of samples in each vector
|
||||
*
|
||||
* <b>Scaling and Overflow Behavior:</b>
|
||||
* \par
|
||||
* The function uses saturating arithmetic.
|
||||
* Results outside of the allowable Q7 range [0x80 0x7F] will be saturated.
|
||||
*/
|
||||
|
||||
void arm_nn_mult_q7(q7_t *pSrcA, q7_t *pSrcB, q7_t *pDst, const uint16_t out_shift, uint32_t blockSize)
|
||||
{
|
||||
uint32_t blkCnt = blockSize; /* loop counters */
|
||||
|
||||
while (blkCnt > 0U)
|
||||
{
|
||||
/* C = A * B */
|
||||
/* Multiply the inputs and store the result in the destination buffer */
|
||||
*pDst++ = (q7_t)__SSAT(((q15_t)((q15_t)(*pSrcA++) * (*pSrcB++) + NN_ROUND(out_shift)) >> out_shift), 8);
|
||||
|
||||
/* Decrement the blockSize loop counter */
|
||||
blkCnt--;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @} end of NNBasicMath group
|
||||
*/
|
|
@ -0,0 +1,211 @@
|
|||
/*
|
||||
* Copyright (C) 2020-2022 Arm Limited or its affiliates.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the License); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* Project: CMSIS NN Library
|
||||
* Title: arm_nn_vec_mat_mult_t_s16
|
||||
* Description: s16 vector by matrix (transposed) multiplication
|
||||
*
|
||||
* $Date: 04. January 2022
|
||||
* $Revision: V.1.2.0
|
||||
*
|
||||
* Target Processor: Cortex-M
|
||||
*
|
||||
* -------------------------------------------------------------------- */
|
||||
|
||||
#include "arm_nnsupportfunctions.h"
|
||||
/**
|
||||
* @ingroup groupSupport
|
||||
*/
|
||||
|
||||
/**
|
||||
* @addtogroup NNBasicMath
|
||||
* @{
|
||||
*/
|
||||
|
||||
/*
|
||||
* s16 vector(lhs) by matrix (transposed) multiplication
|
||||
*
|
||||
* Refer header file for details.
|
||||
*
|
||||
*/
|
||||
arm_status arm_nn_vec_mat_mult_t_s16(const q15_t *lhs,
|
||||
const q7_t *rhs,
|
||||
const q63_t *bias,
|
||||
q15_t *dst,
|
||||
const int32_t dst_multiplier,
|
||||
const int32_t dst_shift,
|
||||
const int32_t rhs_cols,
|
||||
const int32_t rhs_rows,
|
||||
const int32_t activation_min,
|
||||
const int32_t activation_max)
|
||||
{
|
||||
#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI)
|
||||
const int32_t row_loop_cnt = rhs_rows / 2;
|
||||
|
||||
int32_t rhs_cols_fast = rhs_cols;
|
||||
|
||||
if (rhs_cols > 512)
|
||||
{
|
||||
rhs_cols_fast = 512;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < row_loop_cnt; i++)
|
||||
{
|
||||
q63_t acc_64_0 = 0;
|
||||
q63_t acc_64_1 = 0;
|
||||
int32_t acc_0 = 0;
|
||||
int32_t acc_1 = 0;
|
||||
|
||||
const int32_t col_loop_cnt = rhs_cols_fast / 4;
|
||||
|
||||
const int16_t *lhs_vec = lhs;
|
||||
const int8_t *rhs_0 = rhs;
|
||||
const int8_t *rhs_1 = rhs + rhs_cols;
|
||||
rhs += 2 * rhs_cols;
|
||||
|
||||
for (int j = col_loop_cnt; j != 0; j--)
|
||||
{
|
||||
int32_t ker_0, ker_1, vec_part_0, vec_part_1;
|
||||
vec_part_0 = arm_nn_read_q15x2_ia(&lhs_vec);
|
||||
vec_part_1 = arm_nn_read_q15x2_ia(&lhs_vec);
|
||||
|
||||
rhs_0 = read_and_pad(rhs_0, &ker_0, &ker_1);
|
||||
|
||||
acc_0 = __SMLAD(ker_0, vec_part_0, acc_0);
|
||||
acc_0 = __SMLAD(ker_1, vec_part_1, acc_0);
|
||||
|
||||
rhs_1 = read_and_pad(rhs_1, &ker_0, &ker_1);
|
||||
|
||||
acc_1 = __SMLAD(ker_0, vec_part_0, acc_1);
|
||||
acc_1 = __SMLAD(ker_1, vec_part_1, acc_1);
|
||||
}
|
||||
|
||||
acc_64_0 += acc_0;
|
||||
acc_64_1 += acc_1;
|
||||
|
||||
for (int k = col_loop_cnt * 4; k < rhs_cols; k++)
|
||||
{
|
||||
const int32_t lhs_temp = (*lhs_vec);
|
||||
lhs_vec++;
|
||||
acc_64_0 += lhs_temp * (*rhs_0);
|
||||
rhs_0++;
|
||||
acc_64_1 += lhs_temp * (*rhs_1);
|
||||
rhs_1++;
|
||||
}
|
||||
|
||||
if (bias)
|
||||
{
|
||||
acc_64_0 += *bias++;
|
||||
acc_64_1 += *bias++;
|
||||
}
|
||||
q31_t tmp;
|
||||
tmp = arm_nn_requantize_s64(acc_64_0, dst_multiplier, dst_shift);
|
||||
tmp = MAX(tmp, activation_min);
|
||||
tmp = MIN(tmp, activation_max);
|
||||
*dst++ = (q15_t)tmp;
|
||||
|
||||
tmp = arm_nn_requantize_s64(acc_64_1, dst_multiplier, dst_shift);
|
||||
tmp = MAX(tmp, activation_min);
|
||||
tmp = MIN(tmp, activation_max);
|
||||
*dst++ = (q15_t)tmp;
|
||||
}
|
||||
|
||||
if (rhs_rows & 0x1)
|
||||
{
|
||||
q63_t acc_64_0 = 0;
|
||||
int32_t acc_0 = 0;
|
||||
const int32_t col_loop_cnt = rhs_cols_fast / 4;
|
||||
|
||||
const int16_t *lhs_vec = lhs;
|
||||
const int8_t *rhs_0 = rhs;
|
||||
|
||||
for (int i = col_loop_cnt; i != 0; i--)
|
||||
{
|
||||
int32_t ker_0, ker_1, vec;
|
||||
rhs_0 = read_and_pad(rhs_0, &ker_0, &ker_1);
|
||||
|
||||
vec = arm_nn_read_q15x2_ia(&lhs_vec);
|
||||
acc_0 = __SMLAD(ker_0, vec, acc_0);
|
||||
|
||||
vec = arm_nn_read_q15x2_ia(&lhs_vec);
|
||||
acc_0 = __SMLAD(ker_1, vec, acc_0);
|
||||
}
|
||||
|
||||
acc_64_0 += acc_0;
|
||||
|
||||
for (int j = col_loop_cnt * 4; j < rhs_cols; j++)
|
||||
{
|
||||
const int32_t lhs_temp = (*lhs_vec);
|
||||
lhs_vec++;
|
||||
acc_64_0 += lhs_temp * (*rhs_0);
|
||||
rhs_0++;
|
||||
}
|
||||
|
||||
if (bias)
|
||||
{
|
||||
acc_64_0 += *bias++;
|
||||
}
|
||||
q31_t tmp;
|
||||
tmp = arm_nn_requantize_s64(acc_64_0, dst_multiplier, dst_shift);
|
||||
tmp = MAX(tmp, activation_min);
|
||||
tmp = MIN(tmp, activation_max);
|
||||
*dst++ = (q15_t)tmp;
|
||||
}
|
||||
|
||||
#else
|
||||
for (int i_row_loop_cnt = 0; i_row_loop_cnt < rhs_rows; i_row_loop_cnt++)
|
||||
{
|
||||
const q15_t *lhs_ptr = lhs;
|
||||
const q7_t *rhs_ptr_0 = &rhs[0];
|
||||
|
||||
q63_t result = 0;
|
||||
|
||||
if (bias)
|
||||
{
|
||||
result = *bias++;
|
||||
}
|
||||
for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx)
|
||||
{
|
||||
const q63_t rhs_value0 = (int8_t)*rhs_ptr_0;
|
||||
const q63_t lhs_value = *lhs_ptr;
|
||||
|
||||
result += lhs_value * rhs_value0;
|
||||
|
||||
++rhs_ptr_0;
|
||||
++lhs_ptr;
|
||||
}
|
||||
|
||||
// Quantize down
|
||||
result = arm_nn_requantize_s64(result, dst_multiplier, dst_shift);
|
||||
|
||||
// Clamp the result
|
||||
result = ((result) > (activation_min) ? (result) : (activation_min));
|
||||
result = ((result) < (activation_max) ? (result) : (activation_max));
|
||||
|
||||
*dst++ = (q15_t)result;
|
||||
rhs += rhs_cols;
|
||||
}
|
||||
#endif
|
||||
|
||||
return ARM_MATH_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* @} end of NNBasicMath group
|
||||
*/
|
|
@ -0,0 +1,402 @@
|
|||
/*
|
||||
* Copyright (C) 2020-2022 Arm Limited or its affiliates.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the License); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* Project: CMSIS NN Library
|
||||
* Title: arm_nn_vec_mat_mult_t_s8
|
||||
* Description: s8 vector by matrix (transposed) multiplication
|
||||
*
|
||||
* $Date: 28 April 2022
|
||||
* $Revision: V.3.0.1
|
||||
*
|
||||
* Target Processor: Cortex-M
|
||||
*
|
||||
* -------------------------------------------------------------------- */
|
||||
|
||||
#include "arm_nnsupportfunctions.h"
|
||||
|
||||
/**
|
||||
* @ingroup groupSupport
|
||||
*/
|
||||
|
||||
/**
|
||||
* @addtogroup NNBasicMath
|
||||
* @{
|
||||
*/
|
||||
|
||||
/*
|
||||
* s8 vector(lhs) by matrix (transposed) multiplication
|
||||
*
|
||||
* Refer header file for details.
|
||||
*
|
||||
*/
|
||||
arm_status arm_nn_vec_mat_mult_t_s8(const q7_t *lhs,
|
||||
const q7_t *rhs,
|
||||
const q31_t *bias,
|
||||
q7_t *dst,
|
||||
const int32_t lhs_offset,
|
||||
const int32_t rhs_offset,
|
||||
const int32_t dst_offset,
|
||||
const int32_t dst_multiplier,
|
||||
const int32_t dst_shift,
|
||||
const int32_t rhs_cols,
|
||||
const int32_t rhs_rows,
|
||||
const int32_t activation_min,
|
||||
const int32_t activation_max,
|
||||
const int32_t address_offset)
|
||||
{
|
||||
(void)rhs_offset;
|
||||
#if defined(ARM_MATH_MVEI)
|
||||
const int32_t row_loop_cnt = rhs_rows / 3;
|
||||
const uint32x4_t address_offset_array = {0, address_offset, address_offset * 2, address_offset * 3};
|
||||
|
||||
for (int i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; i_row_loop_cnt++)
|
||||
{
|
||||
int32_t acc_0 = 0;
|
||||
int32_t acc_1 = 0;
|
||||
int32_t acc_2 = 0;
|
||||
|
||||
const int32_t col_loop_cnt = (rhs_cols + 15) / 16;
|
||||
|
||||
const int8_t *lhs_vec = lhs;
|
||||
const int8_t *rhs_0 = rhs;
|
||||
const int8_t *rhs_1 = rhs + rhs_cols;
|
||||
const int8_t *rhs_2 = rhs + 2 * rhs_cols;
|
||||
|
||||
int32_t rhs_sum_0 = 0;
|
||||
int32_t rhs_sum_1 = 0;
|
||||
int32_t rhs_sum_2 = 0;
|
||||
|
||||
uint32_t col_cnt = (uint32_t)rhs_cols;
|
||||
|
||||
for (int i = 0; i < col_loop_cnt; i++)
|
||||
{
|
||||
mve_pred16_t p = vctp8q(col_cnt);
|
||||
col_cnt -= 16;
|
||||
|
||||
const int8x16_t input = vldrbq_z_s8(lhs_vec, p);
|
||||
|
||||
const int8x16_t ker_0 = vldrbq_z_s8(rhs_0, p);
|
||||
rhs_sum_0 = vaddvaq_p_s8(rhs_sum_0, ker_0, p);
|
||||
acc_0 = vmladavaq_p_s8(acc_0, ker_0, input, p);
|
||||
|
||||
const int8x16_t ker_1 = vldrbq_z_s8(rhs_1, p);
|
||||
rhs_sum_1 = vaddvaq_p_s8(rhs_sum_1, ker_1, p);
|
||||
acc_1 = vmladavaq_p_s8(acc_1, ker_1, input, p);
|
||||
|
||||
const int8x16_t ker_2 = vldrbq_z_s8(rhs_2, p);
|
||||
rhs_sum_2 = vaddvaq_p_s8(rhs_sum_2, ker_2, p);
|
||||
acc_2 = vmladavaq_p_s8(acc_2, ker_2, input, p);
|
||||
|
||||
lhs_vec += 16;
|
||||
rhs_0 += 16;
|
||||
rhs_1 += 16;
|
||||
rhs_2 += 16;
|
||||
}
|
||||
rhs += 3 * rhs_cols;
|
||||
|
||||
int32x4_t acc = {acc_0, acc_1, acc_2, 0};
|
||||
mve_pred16_t p = vctp32q(3);
|
||||
if (bias)
|
||||
{
|
||||
int32x4_t b = vldrwq_z_s32(bias, p);
|
||||
acc = vaddq_m_s32(vuninitializedq_s32(), acc, b, p);
|
||||
bias += 3;
|
||||
}
|
||||
const int32x4_t rhs_sum = {rhs_sum_0, rhs_sum_1, rhs_sum_2, 0};
|
||||
acc += vdupq_n_s32(lhs_offset) * rhs_sum;
|
||||
|
||||
acc = arm_requantize_mve(acc, dst_multiplier, dst_shift);
|
||||
acc = vaddq_s32(acc, vdupq_n_s32(dst_offset));
|
||||
acc = vmaxq_s32(acc, vdupq_n_s32(activation_min));
|
||||
acc = vminq_s32(acc, vdupq_n_s32(activation_max));
|
||||
|
||||
if (address_offset > 1L)
|
||||
{
|
||||
vstrbq_scatter_offset_s32(dst, address_offset_array, acc);
|
||||
}
|
||||
else
|
||||
{
|
||||
vstrbq_p_s32(dst, acc, p);
|
||||
}
|
||||
dst += 3 * address_offset;
|
||||
}
|
||||
|
||||
const int loop_cnt = rhs_rows % 3;
|
||||
for (int i_row_loop_cnt = 0; i_row_loop_cnt < loop_cnt; i_row_loop_cnt++)
|
||||
{
|
||||
int32_t acc_0 = 0;
|
||||
const int32_t col_loop_cnt = (rhs_cols + 15) / 16;
|
||||
const int8_t *lhs_vec = lhs;
|
||||
const int8_t *rhs_0 = rhs;
|
||||
int32_t rhs_sum_0 = 0;
|
||||
uint32_t col_cnt = (uint32_t)rhs_cols;
|
||||
|
||||
for (int i = 0; i < col_loop_cnt; i++)
|
||||
{
|
||||
mve_pred16_t p = vctp8q(col_cnt);
|
||||
col_cnt -= 16;
|
||||
const int8x16_t input = vldrbq_z_s8(lhs_vec, p);
|
||||
|
||||
const int8x16_t ker_0 = vldrbq_z_s8(rhs_0, p);
|
||||
rhs_sum_0 = vaddvaq_p_s8(rhs_sum_0, ker_0, p);
|
||||
acc_0 = vmladavaq_p_s8(acc_0, ker_0, input, p);
|
||||
|
||||
lhs_vec += 16;
|
||||
rhs_0 += 16;
|
||||
}
|
||||
rhs += rhs_cols;
|
||||
|
||||
if (bias)
|
||||
{
|
||||
acc_0 += *bias;
|
||||
bias++;
|
||||
}
|
||||
const int32_t offsets = rhs_sum_0 * lhs_offset;
|
||||
acc_0 += offsets;
|
||||
acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift);
|
||||
acc_0 += dst_offset;
|
||||
|
||||
// Clamp the result
|
||||
acc_0 = MAX(acc_0, activation_min);
|
||||
*dst = MIN(acc_0, activation_max);
|
||||
dst += address_offset;
|
||||
}
|
||||
|
||||
#elif defined(ARM_MATH_DSP)
|
||||
const int32_t row_loop_cnt = rhs_rows / 2;
|
||||
const int16_t lhs_offset_s16 = (int16_t)lhs_offset;
|
||||
const uint32_t lhs_offset_s16x2 = __PKHBT(lhs_offset_s16, lhs_offset_s16, 16);
|
||||
|
||||
for (int32_t i = 0; i < row_loop_cnt; i++)
|
||||
{
|
||||
int32_t acc_0 = 0;
|
||||
int32_t acc_1 = 0;
|
||||
if (bias)
|
||||
{
|
||||
acc_0 = *bias++;
|
||||
acc_1 = *bias++;
|
||||
}
|
||||
|
||||
const int32_t col_loop_cnt = rhs_cols / 4;
|
||||
|
||||
const int8_t *lhs_vec = lhs;
|
||||
const int8_t *rhs_0 = rhs;
|
||||
const int8_t *rhs_1 = rhs + rhs_cols;
|
||||
rhs += 2 * rhs_cols;
|
||||
|
||||
for (int j = col_loop_cnt; j != 0; j--)
|
||||
{
|
||||
int32_t vec_0 = arm_nn_read_q7x4_ia(&lhs_vec);
|
||||
int32_t vec_1 = __SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)vec_0, 8);
|
||||
|
||||
vec_0 = __SXTAB16(lhs_offset_s16x2, vec_0);
|
||||
|
||||
int32_t ker_0 = arm_nn_read_q7x4_ia(&rhs_0);
|
||||
int32_t ker_1 = __SXTB16_RORn((uint32_t)ker_0, 8);
|
||||
ker_0 = __SXTB16(ker_0);
|
||||
|
||||
acc_0 = __SMLAD(ker_1, vec_1, acc_0);
|
||||
acc_0 = __SMLAD(ker_0, vec_0, acc_0);
|
||||
|
||||
ker_0 = arm_nn_read_q7x4_ia(&rhs_1);
|
||||
ker_1 = __SXTB16_RORn((uint32_t)ker_0, 8);
|
||||
ker_0 = __SXTB16(ker_0);
|
||||
|
||||
acc_1 = __SMLAD(ker_1, vec_1, acc_1);
|
||||
acc_1 = __SMLAD(ker_0, vec_0, acc_1);
|
||||
}
|
||||
|
||||
for (int k = col_loop_cnt * 4; k < rhs_cols; k++)
|
||||
{
|
||||
const int32_t lhs_temp = (*lhs_vec + lhs_offset);
|
||||
lhs_vec++;
|
||||
acc_0 += lhs_temp * (*rhs_0);
|
||||
rhs_0++;
|
||||
acc_1 += lhs_temp * (*rhs_1);
|
||||
rhs_1++;
|
||||
}
|
||||
|
||||
acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift);
|
||||
acc_1 = arm_nn_requantize(acc_1, dst_multiplier, dst_shift);
|
||||
|
||||
// Add offset
|
||||
acc_0 += dst_offset;
|
||||
acc_1 += dst_offset;
|
||||
// Clamp the result
|
||||
acc_0 = MAX(acc_0, activation_min);
|
||||
acc_0 = MIN(acc_0, activation_max);
|
||||
acc_1 = MAX(acc_1, activation_min);
|
||||
acc_1 = MIN(acc_1, activation_max);
|
||||
*dst = (int8_t)acc_0;
|
||||
*(dst + address_offset) = (int8_t)acc_1;
|
||||
dst += 2 * address_offset;
|
||||
}
|
||||
|
||||
if (rhs_rows & 0x1)
|
||||
{
|
||||
int32_t acc_0 = 0;
|
||||
if (bias)
|
||||
{
|
||||
acc_0 = *bias++;
|
||||
}
|
||||
const int32_t col_loop_cnt = rhs_cols / 4;
|
||||
|
||||
const int8_t *lhs_vec = lhs;
|
||||
const int8_t *rhs_0 = rhs;
|
||||
|
||||
for (int i = col_loop_cnt; i != 0; i--)
|
||||
{
|
||||
int32_t vec_0 = arm_nn_read_q7x4_ia(&lhs_vec);
|
||||
int32_t vec_1 = __SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)vec_0, 8);
|
||||
vec_0 = __SXTAB16(lhs_offset_s16x2, vec_0);
|
||||
|
||||
int32_t ker_0 = arm_nn_read_q7x4_ia(&rhs_0);
|
||||
int32_t ker_1 = __SXTB16_RORn((uint32_t)ker_0, 8);
|
||||
ker_0 = __SXTB16(ker_0);
|
||||
|
||||
acc_0 = __SMLAD(ker_1, vec_1, acc_0);
|
||||
acc_0 = __SMLAD(ker_0, vec_0, acc_0);
|
||||
}
|
||||
|
||||
for (int j = col_loop_cnt * 4; j < rhs_cols; j++)
|
||||
{
|
||||
const int32_t lhs_temp = (*lhs_vec + lhs_offset);
|
||||
lhs_vec++;
|
||||
acc_0 += lhs_temp * (*rhs_0);
|
||||
rhs_0++;
|
||||
}
|
||||
|
||||
acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift);
|
||||
|
||||
// Add offset
|
||||
acc_0 += dst_offset;
|
||||
// Clamp the result
|
||||
acc_0 = MAX(acc_0, activation_min);
|
||||
acc_0 = MIN(acc_0, activation_max);
|
||||
*dst = (int8_t)acc_0;
|
||||
dst += address_offset;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
const int32_t row_loop_cnt = rhs_rows / 3;
|
||||
|
||||
for (int i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; i_row_loop_cnt++)
|
||||
{
|
||||
const q7_t *lhs_ptr = lhs;
|
||||
const q7_t *rhs_ptr_0 = &rhs[0];
|
||||
const q7_t *rhs_ptr_1 = &rhs[rhs_cols];
|
||||
const q7_t *rhs_ptr_2 = &rhs[rhs_cols * 2];
|
||||
|
||||
q31_t res00 = 0;
|
||||
q31_t res01 = 0;
|
||||
q31_t res02 = 0;
|
||||
if (bias)
|
||||
{
|
||||
res00 = *bias++;
|
||||
res01 = *bias++;
|
||||
res02 = *bias++;
|
||||
}
|
||||
for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx)
|
||||
{
|
||||
const q31_t rhs_value0 = (int8_t)*rhs_ptr_0;
|
||||
const q31_t rhs_value1 = (int8_t)*rhs_ptr_1;
|
||||
const q31_t rhs_value2 = (int8_t)*rhs_ptr_2;
|
||||
const q31_t lhs_value = (int8_t)*lhs_ptr + lhs_offset;
|
||||
|
||||
res00 += lhs_value * rhs_value0;
|
||||
res01 += lhs_value * rhs_value1;
|
||||
res02 += lhs_value * rhs_value2;
|
||||
|
||||
++rhs_ptr_0;
|
||||
++rhs_ptr_1;
|
||||
++rhs_ptr_2;
|
||||
++lhs_ptr;
|
||||
}
|
||||
// Quantize down
|
||||
res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift);
|
||||
res01 = arm_nn_requantize(res01, dst_multiplier, dst_shift);
|
||||
res02 = arm_nn_requantize(res02, dst_multiplier, dst_shift);
|
||||
|
||||
// Add offset
|
||||
res00 += dst_offset;
|
||||
res01 += dst_offset;
|
||||
res02 += dst_offset;
|
||||
|
||||
// Clamp the result
|
||||
res00 = MAX(res00, activation_min);
|
||||
res00 = MIN(res00, activation_max);
|
||||
res01 = MAX(res01, activation_min);
|
||||
res01 = MIN(res01, activation_max);
|
||||
res02 = MAX(res02, activation_min);
|
||||
res02 = MIN(res02, activation_max);
|
||||
|
||||
*dst = (q7_t)res00;
|
||||
*(dst + address_offset) = (q7_t)res01;
|
||||
*(dst + 2 * address_offset) = (q7_t)res02;
|
||||
dst += 3 * address_offset;
|
||||
|
||||
rhs += 3 * rhs_cols;
|
||||
}
|
||||
|
||||
const int loop_cnt = rhs_rows % 3;
|
||||
|
||||
for (int i_loop_cnt = 0; i_loop_cnt < loop_cnt; i_loop_cnt++)
|
||||
{
|
||||
const q7_t *lhs_ptr = &lhs[0];
|
||||
const q7_t *rhs_ptr = &rhs[0];
|
||||
|
||||
q31_t res00 = 0;
|
||||
if (bias)
|
||||
{
|
||||
res00 = *bias++;
|
||||
}
|
||||
|
||||
for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx)
|
||||
{
|
||||
q31_t rhs_value0 = (int8_t)rhs_ptr[0];
|
||||
q31_t lhs_value = (int8_t)lhs_ptr[0] + lhs_offset;
|
||||
|
||||
res00 += lhs_value * rhs_value0;
|
||||
|
||||
++rhs_ptr;
|
||||
++lhs_ptr;
|
||||
}
|
||||
|
||||
// Quantize down
|
||||
res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift);
|
||||
|
||||
// Add offset
|
||||
res00 += dst_offset;
|
||||
|
||||
// Clamp the result
|
||||
res00 = MAX(res00, activation_min);
|
||||
res00 = MIN(res00, activation_max);
|
||||
|
||||
*dst = (int8_t)res00;
|
||||
dst += address_offset;
|
||||
rhs += rhs_cols;
|
||||
}
|
||||
#endif
|
||||
return ARM_MATH_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* @} end of NNBasicMath group
|
||||
*/
|
|
@ -0,0 +1,341 @@
|
|||
/*
|
||||
* Copyright (C) 2021 Arm Limited or its affiliates. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the License); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* Project: CMSIS NN Library
|
||||
* Title: arm_nn_vec_mat_mult_t_svdf_s8
|
||||
* Description: s8 vector by matrix (transposed) multiplication with
|
||||
* s16 output. Targetted at SVDF operator.
|
||||
*
|
||||
* $Date: 15. April 2021
|
||||
* $Revision: V.1.0.0
|
||||
*
|
||||
* Target Processor: Cortex-M
|
||||
*
|
||||
* -------------------------------------------------------------------- */
|
||||
|
||||
#include "arm_nnsupportfunctions.h"
|
||||
|
||||
/**
|
||||
* @ingroup groupSupport
|
||||
*/
|
||||
|
||||
/**
|
||||
* @addtogroup NNBasicMath
|
||||
* @{
|
||||
*/
|
||||
|
||||
/*
|
||||
* s8 vector(lhs) by matrix (transposed) multiplication
|
||||
*
|
||||
* Refer header file for details.
|
||||
*
|
||||
*/
|
||||
arm_status arm_nn_vec_mat_mult_t_svdf_s8(const q7_t *lhs,
|
||||
const q7_t *rhs,
|
||||
q15_t *dst,
|
||||
const int32_t lhs_offset,
|
||||
const int32_t rhs_offset,
|
||||
const int32_t dst_offset,
|
||||
const int32_t dst_multiplier,
|
||||
const int32_t dst_shift,
|
||||
const int32_t rhs_cols,
|
||||
const int32_t rhs_rows,
|
||||
const int32_t activation_min,
|
||||
const int32_t activation_max)
|
||||
{
|
||||
(void)rhs_offset;
|
||||
if (rhs_cols < 0 || (NN_Q31_MAX - rhs_cols) < 16 || dst_offset < 0)
|
||||
{
|
||||
return ARM_MATH_ARGUMENT_ERROR;
|
||||
}
|
||||
|
||||
(void)rhs_offset;
|
||||
#if defined(ARM_MATH_MVEI)
|
||||
int32_t row_loop_cnt = rhs_rows / 3;
|
||||
|
||||
for (int i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; i_row_loop_cnt++)
|
||||
{
|
||||
int32_t acc_0 = 0;
|
||||
int32_t acc_1 = 0;
|
||||
int32_t acc_2 = 0;
|
||||
|
||||
const int32_t col_loop_cnt = (rhs_cols + 15) / 16;
|
||||
|
||||
const int8_t *lhs_vec = lhs;
|
||||
const int8_t *rhs_0 = rhs;
|
||||
const int8_t *rhs_1 = rhs + rhs_cols;
|
||||
const int8_t *rhs_2 = rhs + 2 * rhs_cols;
|
||||
|
||||
int32_t rhs_sum_0 = 0;
|
||||
int32_t rhs_sum_1 = 0;
|
||||
int32_t rhs_sum_2 = 0;
|
||||
|
||||
uint32_t col_cnt = (uint32_t)rhs_cols;
|
||||
|
||||
for (int i = 0; i < col_loop_cnt; i++)
|
||||
{
|
||||
mve_pred16_t p = vctp8q(col_cnt);
|
||||
col_cnt -= 16;
|
||||
|
||||
const int8x16_t input = vldrbq_z_s8(lhs_vec, p);
|
||||
|
||||
const int8x16_t ker_0 = vldrbq_z_s8(rhs_0, p);
|
||||
rhs_sum_0 = vaddvaq_p_s8(rhs_sum_0, ker_0, p);
|
||||
acc_0 = vmladavaq_p_s8(acc_0, ker_0, input, p);
|
||||
|
||||
const int8x16_t ker_1 = vldrbq_z_s8(rhs_1, p);
|
||||
rhs_sum_1 = vaddvaq_p_s8(rhs_sum_1, ker_1, p);
|
||||
acc_1 = vmladavaq_p_s8(acc_1, ker_1, input, p);
|
||||
|
||||
const int8x16_t ker_2 = vldrbq_z_s8(rhs_2, p);
|
||||
rhs_sum_2 = vaddvaq_p_s8(rhs_sum_2, ker_2, p);
|
||||
acc_2 = vmladavaq_p_s8(acc_2, ker_2, input, p);
|
||||
|
||||
lhs_vec += 16;
|
||||
rhs_0 += 16;
|
||||
rhs_1 += 16;
|
||||
rhs_2 += 16;
|
||||
}
|
||||
rhs += 3 * rhs_cols;
|
||||
|
||||
int32x4_t acc = {acc_0, acc_1, acc_2, 0};
|
||||
const int32x4_t rhs_sum = {rhs_sum_0, rhs_sum_1, rhs_sum_2, 0};
|
||||
acc += vdupq_n_s32(lhs_offset) * rhs_sum;
|
||||
|
||||
acc = arm_requantize_mve(acc, dst_multiplier, dst_shift);
|
||||
acc = vmaxq_s32(acc, vdupq_n_s32(activation_min));
|
||||
acc = vminq_s32(acc, vdupq_n_s32(activation_max));
|
||||
*(dst) = (int16_t)acc[0];
|
||||
*(dst + dst_offset) = (int16_t)acc[1];
|
||||
*(dst + 2 * dst_offset) = (int16_t)acc[2];
|
||||
dst += 3 * dst_offset;
|
||||
}
|
||||
|
||||
const int loop_cnt = rhs_rows % 3;
|
||||
for (int i_row_loop_cnt = 0; i_row_loop_cnt < loop_cnt; i_row_loop_cnt++)
|
||||
{
|
||||
int32_t acc_0 = 0;
|
||||
const int32_t col_loop_cnt = (rhs_cols + 15) / 16;
|
||||
const int8_t *lhs_vec = lhs;
|
||||
const int8_t *rhs_0 = rhs;
|
||||
int32_t rhs_sum_0 = 0;
|
||||
uint32_t col_cnt = (uint32_t)rhs_cols;
|
||||
|
||||
for (int i = 0; i < col_loop_cnt; i++)
|
||||
{
|
||||
mve_pred16_t p = vctp8q(col_cnt);
|
||||
col_cnt -= 16;
|
||||
const int8x16_t input = vldrbq_z_s8(lhs_vec, p);
|
||||
|
||||
const int8x16_t ker_0 = vldrbq_z_s8(rhs_0, p);
|
||||
rhs_sum_0 = vaddvaq_p_s8(rhs_sum_0, ker_0, p);
|
||||
acc_0 = vmladavaq_p_s8(acc_0, ker_0, input, p);
|
||||
|
||||
lhs_vec += 16;
|
||||
rhs_0 += 16;
|
||||
}
|
||||
rhs += rhs_cols;
|
||||
|
||||
const int32_t offsets = rhs_sum_0 * lhs_offset;
|
||||
acc_0 = __QADD(acc_0, offsets);
|
||||
acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift);
|
||||
|
||||
// Clamp the result
|
||||
acc_0 = MAX(acc_0, activation_min);
|
||||
*dst = (q15_t)MIN(acc_0, activation_max);
|
||||
dst += dst_offset;
|
||||
}
|
||||
|
||||
#elif defined(ARM_MATH_DSP)
|
||||
int32_t row_loop_cnt = rhs_rows / 2;
|
||||
|
||||
const int16_t lhs_offset_s16 = lhs_offset;
|
||||
const int16_t rhs_offset_s16 = rhs_offset;
|
||||
|
||||
const uint32_t lhs_offset_s16x2 = __PKHBT(lhs_offset_s16, lhs_offset_s16, 16);
|
||||
const uint32_t rhs_offset_s16x2 = __PKHBT(rhs_offset_s16, rhs_offset_s16, 16);
|
||||
for (int32_t i = 0; i < row_loop_cnt; i++)
|
||||
{
|
||||
int32_t acc_0 = 0;
|
||||
int32_t acc_1 = 0;
|
||||
|
||||
const int32_t col_loop_cnt = rhs_cols / 4;
|
||||
const int8_t *lhs_vec = lhs;
|
||||
const int8_t *rhs_0 = rhs;
|
||||
const int8_t *rhs_1 = rhs + rhs_cols;
|
||||
rhs += 2 * rhs_cols;
|
||||
for (int j = col_loop_cnt; j != 0; j--)
|
||||
{
|
||||
int32_t vec_0 = arm_nn_read_q7x4_ia(&lhs_vec);
|
||||
int32_t vec_1 = __SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)vec_0, 8);
|
||||
vec_0 = __SXTAB16(lhs_offset_s16x2, vec_0);
|
||||
int32_t ker_0 = arm_nn_read_q7x4_ia(&rhs_0);
|
||||
int32_t ker_1 = __SXTAB16_RORn(rhs_offset_s16x2, (uint32_t)ker_0, 8);
|
||||
ker_0 = __SXTAB16(rhs_offset_s16x2, ker_0);
|
||||
acc_0 = __SMLAD(ker_1, vec_1, acc_0);
|
||||
acc_0 = __SMLAD(ker_0, vec_0, acc_0);
|
||||
ker_0 = arm_nn_read_q7x4_ia(&rhs_1);
|
||||
ker_1 = __SXTAB16_RORn(rhs_offset_s16x2, (uint32_t)ker_0, 8);
|
||||
ker_0 = __SXTAB16(rhs_offset_s16x2, ker_0);
|
||||
acc_1 = __SMLAD(ker_1, vec_1, acc_1);
|
||||
acc_1 = __SMLAD(ker_0, vec_0, acc_1);
|
||||
}
|
||||
for (int k = col_loop_cnt * 4; k < rhs_cols; k++)
|
||||
{
|
||||
const int32_t lhs_temp = (*lhs_vec + lhs_offset);
|
||||
lhs_vec++;
|
||||
acc_0 += lhs_temp * (*rhs_0 + rhs_offset);
|
||||
rhs_0++;
|
||||
acc_1 += lhs_temp * (*rhs_1 + rhs_offset);
|
||||
rhs_1++;
|
||||
}
|
||||
acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift);
|
||||
acc_1 = arm_nn_requantize(acc_1, dst_multiplier, dst_shift);
|
||||
|
||||
// Clamp the result
|
||||
acc_0 = MAX(acc_0, activation_min);
|
||||
acc_0 = MIN(acc_0, activation_max);
|
||||
acc_1 = MAX(acc_1, activation_min);
|
||||
acc_1 = MIN(acc_1, activation_max);
|
||||
*dst = (q15_t)acc_0;
|
||||
*(dst + dst_offset) = (q15_t)acc_1;
|
||||
dst += 2 * dst_offset;
|
||||
}
|
||||
if (rhs_rows & 0x1)
|
||||
{
|
||||
int32_t acc_0 = 0;
|
||||
const int32_t col_loop_cnt = rhs_cols / 4;
|
||||
const int8_t *lhs_vec = lhs;
|
||||
const int8_t *rhs_0 = rhs;
|
||||
for (int i = col_loop_cnt; i != 0; i--)
|
||||
{
|
||||
int32_t vec_0 = arm_nn_read_q7x4_ia(&lhs_vec);
|
||||
int32_t vec_1 = __SXTAB16(lhs_offset_s16x2, __ROR((uint32_t)vec_0, 8));
|
||||
vec_0 = __SXTAB16(lhs_offset_s16x2, vec_0);
|
||||
int32_t ker_0 = arm_nn_read_q7x4_ia(&rhs_0);
|
||||
int32_t ker_1 = __SXTAB16(rhs_offset_s16x2, __ROR((uint32_t)ker_0, 8));
|
||||
ker_0 = __SXTAB16(rhs_offset_s16x2, ker_0);
|
||||
acc_0 = __SMLAD(ker_1, vec_1, acc_0);
|
||||
acc_0 = __SMLAD(ker_0, vec_0, acc_0);
|
||||
}
|
||||
for (int j = col_loop_cnt * 4; j < rhs_cols; j++)
|
||||
{
|
||||
const int32_t lhs_temp = (*lhs_vec + lhs_offset);
|
||||
lhs_vec++;
|
||||
acc_0 += lhs_temp * (*rhs_0 + rhs_offset);
|
||||
rhs_0++;
|
||||
}
|
||||
acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift);
|
||||
|
||||
// Clamp the result
|
||||
acc_0 = MAX(acc_0, activation_min);
|
||||
acc_0 = MIN(acc_0, activation_max);
|
||||
*dst = (q15_t)acc_0;
|
||||
dst += dst_offset;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
int32_t row_loop_cnt = rhs_rows / 3;
|
||||
|
||||
for (int i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; i_row_loop_cnt++)
|
||||
{
|
||||
const q7_t *lhs_ptr = lhs;
|
||||
const q7_t *rhs_ptr_0 = &rhs[0];
|
||||
const q7_t *rhs_ptr_1 = &rhs[rhs_cols];
|
||||
const q7_t *rhs_ptr_2 = &rhs[rhs_cols * 2];
|
||||
|
||||
q31_t res00 = 0;
|
||||
q31_t res01 = 0;
|
||||
q31_t res02 = 0;
|
||||
for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx)
|
||||
{
|
||||
const q31_t rhs_value0 = (int8_t)*rhs_ptr_0;
|
||||
const q31_t rhs_value1 = (int8_t)*rhs_ptr_1;
|
||||
const q31_t rhs_value2 = (int8_t)*rhs_ptr_2;
|
||||
const q31_t lhs_value = (int8_t)*lhs_ptr + lhs_offset;
|
||||
|
||||
res00 += lhs_value * rhs_value0;
|
||||
res01 += lhs_value * rhs_value1;
|
||||
res02 += lhs_value * rhs_value2;
|
||||
|
||||
++rhs_ptr_0;
|
||||
++rhs_ptr_1;
|
||||
++rhs_ptr_2;
|
||||
++lhs_ptr;
|
||||
}
|
||||
// Quantize down
|
||||
res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift);
|
||||
res01 = arm_nn_requantize(res01, dst_multiplier, dst_shift);
|
||||
res02 = arm_nn_requantize(res02, dst_multiplier, dst_shift);
|
||||
|
||||
// Clamp the result
|
||||
res00 = MAX(res00, activation_min);
|
||||
res00 = MIN(res00, activation_max);
|
||||
res01 = MAX(res01, activation_min);
|
||||
res01 = MIN(res01, activation_max);
|
||||
res02 = MAX(res02, activation_min);
|
||||
res02 = MIN(res02, activation_max);
|
||||
|
||||
*dst = (q15_t)res00;
|
||||
*(dst + dst_offset) = (q15_t)res01;
|
||||
*(dst + 2 * dst_offset) = (q15_t)res02;
|
||||
dst += 3 * dst_offset;
|
||||
rhs += 3 * rhs_cols;
|
||||
}
|
||||
|
||||
const int loop_cnt = rhs_rows % 3;
|
||||
|
||||
for (int i_loop_cnt = 0; i_loop_cnt < loop_cnt; i_loop_cnt++)
|
||||
{
|
||||
const q7_t *lhs_ptr = &lhs[0];
|
||||
const q7_t *rhs_ptr = &rhs[0];
|
||||
|
||||
q31_t res00 = 0;
|
||||
|
||||
for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx)
|
||||
{
|
||||
q31_t rhs_value0 = (int8_t)rhs_ptr[0] + rhs_offset;
|
||||
q31_t lhs_value = (int8_t)lhs_ptr[0] + lhs_offset;
|
||||
|
||||
res00 += lhs_value * rhs_value0;
|
||||
|
||||
++rhs_ptr;
|
||||
++lhs_ptr;
|
||||
}
|
||||
|
||||
// Quantize down
|
||||
res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift);
|
||||
|
||||
// Clamp the result
|
||||
res00 = MAX(res00, activation_min);
|
||||
res00 = MIN(res00, activation_max);
|
||||
|
||||
*dst = (q15_t)res00;
|
||||
dst += dst_offset;
|
||||
rhs += rhs_cols;
|
||||
}
|
||||
#endif
|
||||
|
||||
return ARM_MATH_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* @} end of NNBasicMath group
|
||||
*/
|
203
Drivers/CMSIS/NN/Source/NNSupportFunctions/arm_nntables.c
Normal file
203
Drivers/CMSIS/NN/Source/NNSupportFunctions/arm_nntables.c
Normal file
|
@ -0,0 +1,203 @@
|
|||
/*
|
||||
* Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the License); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* Project: CMSIS NN Library
|
||||
* Title: arm_nntables.c
|
||||
* Description: Converts the elements of the Q7 vector to Q15 vector without left-shift
|
||||
*
|
||||
* $Date: 17. January 2018
|
||||
* $Revision: V.1.0.0
|
||||
*
|
||||
* Target Processor: Cortex-M cores
|
||||
*
|
||||
* -------------------------------------------------------------------- */
|
||||
|
||||
#include "arm_nnsupportfunctions.h"
|
||||
|
||||
/**
|
||||
* @brief tables for various activation functions
|
||||
*
|
||||
* This file include the declaration of common tables.
|
||||
* Most of them are used for activation functions
|
||||
*
|
||||
* Assumption:
|
||||
* Unified table: input is 3.x format, i.e, range of [-8, 8)
|
||||
* sigmoid(8) = 0.9996646498695336
|
||||
* tanh(8) = 0.9999997749296758
|
||||
* The accuracy here should be good enough
|
||||
*
|
||||
* 2-stage HL table:
|
||||
*
|
||||
* The entire input range is divided into two parts:
|
||||
*
|
||||
* Low range table: 0x000x xxxx or 0x111x xxxx
|
||||
* table entry will be the binary number excluding the first
|
||||
* two digits, i.e., 0x0x xxxx or 0x1x xxxx
|
||||
*
|
||||
*
|
||||
*
|
||||
* High range table 0x0010 0000 -- 0x0111 1111
|
||||
* 0x1000 0000 -- 0x1101 1111
|
||||
*
|
||||
* For positive numbers, table entry will be
|
||||
* 0x0010 0000 -- 0x0111 1111 minus 0x0010 0000
|
||||
* i.e., 0x0000 0000 - 0x0101 11111
|
||||
*
|
||||
* same thing for the negative numbers, table entry will be
|
||||
* 0x1000 0000 -- 0x1101 1111 minux 0x0010 0000
|
||||
* i.e., 0x0110 0000 - 0x1011 1111
|
||||
*/
|
||||
|
||||
const q7_t sigmoidTable_q7[256] = {
|
||||
0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x53, 0x55, 0x57, 0x59, 0x5a, 0x5c, 0x5e, 0x5f, 0x61,
|
||||
0x62, 0x63, 0x65, 0x66, 0x67, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x72, 0x73, 0x74, 0x74,
|
||||
0x75, 0x76, 0x76, 0x77, 0x77, 0x78, 0x78, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7c,
|
||||
0x7c, 0x7d, 0x7d, 0x7d, 0x7d, 0x7d, 0x7e, 0x7e, 0x7e, 0x7e, 0x7e, 0x7e, 0x7e, 0x7e, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
|
||||
0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
|
||||
0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
|
||||
0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01,
|
||||
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02,
|
||||
0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x06, 0x06,
|
||||
0x06, 0x07, 0x07, 0x08, 0x08, 0x09, 0x09, 0x0a, 0x0a, 0x0b, 0x0c, 0x0c, 0x0d, 0x0e, 0x0e, 0x0f, 0x10, 0x11, 0x12,
|
||||
0x13, 0x14, 0x15, 0x16, 0x17, 0x19, 0x1a, 0x1b, 0x1d, 0x1e, 0x1f, 0x21, 0x22, 0x24, 0x26, 0x27, 0x29, 0x2b, 0x2d,
|
||||
0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
|
||||
};
|
||||
|
||||
const q15_t sigmoidTable_q15[256] = {
|
||||
0x4000, 0x4200, 0x43ff, 0x45fc, 0x47f5, 0x49eb, 0x4bdc, 0x4dc8, 0x4fad, 0x518a, 0x5360, 0x552c, 0x56ef, 0x58a8,
|
||||
0x5a57, 0x5bfb, 0x5d93, 0x5f20, 0x60a1, 0x6216, 0x637f, 0x64db, 0x662b, 0x676f, 0x68a6, 0x69d2, 0x6af1, 0x6c05,
|
||||
0x6d0d, 0x6e09, 0x6efb, 0x6fe2, 0x70be, 0x7190, 0x7258, 0x7316, 0x73cc, 0x7478, 0x751b, 0x75b7, 0x764a, 0x76d6,
|
||||
0x775b, 0x77d8, 0x784f, 0x78c0, 0x792a, 0x798f, 0x79ee, 0x7a48, 0x7a9d, 0x7aed, 0x7b39, 0x7b80, 0x7bc4, 0x7c03,
|
||||
0x7c3f, 0x7c78, 0x7cad, 0x7ce0, 0x7d0f, 0x7d3c, 0x7d66, 0x7d8d, 0x7db3, 0x7dd6, 0x7df7, 0x7e16, 0x7e33, 0x7e4f,
|
||||
0x7e69, 0x7e81, 0x7e98, 0x7eae, 0x7ec2, 0x7ed5, 0x7ee7, 0x7ef8, 0x7f08, 0x7f17, 0x7f25, 0x7f32, 0x7f3e, 0x7f4a,
|
||||
0x7f55, 0x7f5f, 0x7f69, 0x7f72, 0x7f7b, 0x7f83, 0x7f8a, 0x7f91, 0x7f98, 0x7f9e, 0x7fa4, 0x7faa, 0x7faf, 0x7fb4,
|
||||
0x7fb8, 0x7fbd, 0x7fc1, 0x7fc5, 0x7fc8, 0x7fcc, 0x7fcf, 0x7fd2, 0x7fd5, 0x7fd7, 0x7fda, 0x7fdc, 0x7fde, 0x7fe0,
|
||||
0x7fe2, 0x7fe4, 0x7fe6, 0x7fe7, 0x7fe9, 0x7fea, 0x7feb, 0x7fed, 0x7fee, 0x7fef, 0x7ff0, 0x7ff1, 0x7ff2, 0x7ff3,
|
||||
0x7ff4, 0x7ff4, 0x000b, 0x000c, 0x000c, 0x000d, 0x000e, 0x000f, 0x0010, 0x0011, 0x0012, 0x0013, 0x0015, 0x0016,
|
||||
0x0017, 0x0019, 0x001a, 0x001c, 0x001e, 0x0020, 0x0022, 0x0024, 0x0026, 0x0029, 0x002b, 0x002e, 0x0031, 0x0034,
|
||||
0x0038, 0x003b, 0x003f, 0x0043, 0x0048, 0x004c, 0x0051, 0x0056, 0x005c, 0x0062, 0x0068, 0x006f, 0x0076, 0x007d,
|
||||
0x0085, 0x008e, 0x0097, 0x00a1, 0x00ab, 0x00b6, 0x00c2, 0x00ce, 0x00db, 0x00e9, 0x00f8, 0x0108, 0x0119, 0x012b,
|
||||
0x013e, 0x0152, 0x0168, 0x017f, 0x0197, 0x01b1, 0x01cd, 0x01ea, 0x0209, 0x022a, 0x024d, 0x0273, 0x029a, 0x02c4,
|
||||
0x02f1, 0x0320, 0x0353, 0x0388, 0x03c1, 0x03fd, 0x043c, 0x0480, 0x04c7, 0x0513, 0x0563, 0x05b8, 0x0612, 0x0671,
|
||||
0x06d6, 0x0740, 0x07b1, 0x0828, 0x08a5, 0x092a, 0x09b6, 0x0a49, 0x0ae5, 0x0b88, 0x0c34, 0x0cea, 0x0da8, 0x0e70,
|
||||
0x0f42, 0x101e, 0x1105, 0x11f7, 0x12f3, 0x13fb, 0x150f, 0x162e, 0x175a, 0x1891, 0x19d5, 0x1b25, 0x1c81, 0x1dea,
|
||||
0x1f5f, 0x20e0, 0x226d, 0x2405, 0x25a9, 0x2758, 0x2911, 0x2ad4, 0x2ca0, 0x2e76, 0x3053, 0x3238, 0x3424, 0x3615,
|
||||
0x380b, 0x3a04, 0x3c01, 0x3e00,
|
||||
};
|
||||
|
||||
const q15_t sigmoidLTable_q15[128] = {
|
||||
0x4000, 0x4100, 0x4200, 0x42ff, 0x43ff, 0x44fd, 0x45fc, 0x46f9, 0x47f5, 0x48f1, 0x49eb, 0x4ae5, 0x4bdc,
|
||||
0x4cd3, 0x4dc8, 0x4ebb, 0x4fad, 0x509c, 0x518a, 0x5276, 0x5360, 0x5447, 0x552c, 0x560f, 0x56ef, 0x57cd,
|
||||
0x58a8, 0x5981, 0x5a57, 0x5b2a, 0x5bfb, 0x5cc9, 0x5d93, 0x5e5b, 0x5f20, 0x5fe2, 0x60a1, 0x615d, 0x6216,
|
||||
0x62cc, 0x637f, 0x642e, 0x64db, 0x6584, 0x662b, 0x66ce, 0x676f, 0x680c, 0x68a6, 0x693d, 0x69d2, 0x6a63,
|
||||
0x6af1, 0x6b7c, 0x6c05, 0x6c8a, 0x6d0d, 0x6d8d, 0x6e09, 0x6e84, 0x6efb, 0x6f70, 0x6fe2, 0x7051, 0x0f42,
|
||||
0x0faf, 0x101e, 0x1090, 0x1105, 0x117c, 0x11f7, 0x1273, 0x12f3, 0x1376, 0x13fb, 0x1484, 0x150f, 0x159d,
|
||||
0x162e, 0x16c3, 0x175a, 0x17f4, 0x1891, 0x1932, 0x19d5, 0x1a7c, 0x1b25, 0x1bd2, 0x1c81, 0x1d34, 0x1dea,
|
||||
0x1ea3, 0x1f5f, 0x201e, 0x20e0, 0x21a5, 0x226d, 0x2337, 0x2405, 0x24d6, 0x25a9, 0x267f, 0x2758, 0x2833,
|
||||
0x2911, 0x29f1, 0x2ad4, 0x2bb9, 0x2ca0, 0x2d8a, 0x2e76, 0x2f64, 0x3053, 0x3145, 0x3238, 0x332d, 0x3424,
|
||||
0x351b, 0x3615, 0x370f, 0x380b, 0x3907, 0x3a04, 0x3b03, 0x3c01, 0x3d01, 0x3e00, 0x3f00,
|
||||
};
|
||||
|
||||
const q15_t sigmoidHTable_q15[192] = {
|
||||
0x70be, 0x7190, 0x7258, 0x7316, 0x73cc, 0x7478, 0x751b, 0x75b7, 0x764a, 0x76d6, 0x775b, 0x77d8, 0x784f, 0x78c0,
|
||||
0x792a, 0x798f, 0x79ee, 0x7a48, 0x7a9d, 0x7aed, 0x7b39, 0x7b80, 0x7bc4, 0x7c03, 0x7c3f, 0x7c78, 0x7cad, 0x7ce0,
|
||||
0x7d0f, 0x7d3c, 0x7d66, 0x7d8d, 0x7db3, 0x7dd6, 0x7df7, 0x7e16, 0x7e33, 0x7e4f, 0x7e69, 0x7e81, 0x7e98, 0x7eae,
|
||||
0x7ec2, 0x7ed5, 0x7ee7, 0x7ef8, 0x7f08, 0x7f17, 0x7f25, 0x7f32, 0x7f3e, 0x7f4a, 0x7f55, 0x7f5f, 0x7f69, 0x7f72,
|
||||
0x7f7b, 0x7f83, 0x7f8a, 0x7f91, 0x7f98, 0x7f9e, 0x7fa4, 0x7faa, 0x7faf, 0x7fb4, 0x7fb8, 0x7fbd, 0x7fc1, 0x7fc5,
|
||||
0x7fc8, 0x7fcc, 0x7fcf, 0x7fd2, 0x7fd5, 0x7fd7, 0x7fda, 0x7fdc, 0x7fde, 0x7fe0, 0x7fe2, 0x7fe4, 0x7fe6, 0x7fe7,
|
||||
0x7fe9, 0x7fea, 0x7feb, 0x7fed, 0x7fee, 0x7fef, 0x7ff0, 0x7ff1, 0x7ff2, 0x7ff3, 0x7ff4, 0x7ff4, 0x000b, 0x000c,
|
||||
0x000c, 0x000d, 0x000e, 0x000f, 0x0010, 0x0011, 0x0012, 0x0013, 0x0015, 0x0016, 0x0017, 0x0019, 0x001a, 0x001c,
|
||||
0x001e, 0x0020, 0x0022, 0x0024, 0x0026, 0x0029, 0x002b, 0x002e, 0x0031, 0x0034, 0x0038, 0x003b, 0x003f, 0x0043,
|
||||
0x0048, 0x004c, 0x0051, 0x0056, 0x005c, 0x0062, 0x0068, 0x006f, 0x0076, 0x007d, 0x0085, 0x008e, 0x0097, 0x00a1,
|
||||
0x00ab, 0x00b6, 0x00c2, 0x00ce, 0x00db, 0x00e9, 0x00f8, 0x0108, 0x0119, 0x012b, 0x013e, 0x0152, 0x0168, 0x017f,
|
||||
0x0197, 0x01b1, 0x01cd, 0x01ea, 0x0209, 0x022a, 0x024d, 0x0273, 0x029a, 0x02c4, 0x02f1, 0x0320, 0x0353, 0x0388,
|
||||
0x03c1, 0x03fd, 0x043c, 0x0480, 0x04c7, 0x0513, 0x0563, 0x05b8, 0x0612, 0x0671, 0x06d6, 0x0740, 0x07b1, 0x0828,
|
||||
0x08a5, 0x092a, 0x09b6, 0x0a49, 0x0ae5, 0x0b88, 0x0c34, 0x0cea, 0x0da8, 0x0e70,
|
||||
};
|
||||
|
||||
const q7_t tanhTable_q7[256] = {
|
||||
0x00, 0x08, 0x10, 0x18, 0x1f, 0x27, 0x2e, 0x35, 0x3b, 0x41, 0x47, 0x4c, 0x51, 0x56, 0x5a, 0x5e, 0x61, 0x65, 0x68,
|
||||
0x6a, 0x6d, 0x6f, 0x71, 0x72, 0x74, 0x75, 0x76, 0x78, 0x78, 0x79, 0x7a, 0x7b, 0x7b, 0x7c, 0x7c, 0x7d, 0x7d, 0x7e,
|
||||
0x7e, 0x7e, 0x7e, 0x7e, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
|
||||
0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
|
||||
0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
|
||||
0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
|
||||
0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x80, 0x80,
|
||||
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
|
||||
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
|
||||
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
|
||||
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x81, 0x81,
|
||||
0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x82, 0x82, 0x83, 0x83, 0x84, 0x84, 0x85, 0x85, 0x86, 0x87,
|
||||
0x88, 0x88, 0x8a, 0x8b, 0x8c, 0x8e, 0x8f, 0x91, 0x93, 0x96, 0x98, 0x9b, 0x9f, 0xa2, 0xa6, 0xaa, 0xaf, 0xb4, 0xb9,
|
||||
0xbf, 0xc5, 0xcb, 0xd2, 0xd9, 0xe1, 0xe8, 0xf0, 0xf8,
|
||||
};
|
||||
|
||||
const q15_t tanhTable_q15[256] = {
|
||||
0x0000, 0x07fd, 0x0feb, 0x17b9, 0x1f59, 0x26bf, 0x2ddf, 0x34ae, 0x3b27, 0x4142, 0x46fd, 0x4c56, 0x514d, 0x55e2,
|
||||
0x5a1a, 0x5df6, 0x617c, 0x64b0, 0x6797, 0x6a37, 0x6c95, 0x6eb5, 0x709e, 0x7254, 0x73dc, 0x753a, 0x7672, 0x7788,
|
||||
0x787f, 0x795b, 0x7a1e, 0x7acb, 0x7b65, 0x7bee, 0x7c66, 0x7cd1, 0x7d30, 0x7d84, 0x7dce, 0x7e0f, 0x7e49, 0x7e7d,
|
||||
0x7eaa, 0x7ed2, 0x7ef5, 0x7f14, 0x7f30, 0x7f48, 0x7f5e, 0x7f71, 0x7f82, 0x7f91, 0x7f9e, 0x7fa9, 0x7fb3, 0x7fbc,
|
||||
0x7fc4, 0x7fcb, 0x7fd1, 0x7fd7, 0x7fdc, 0x7fe0, 0x7fe4, 0x7fe7, 0x7fea, 0x7fed, 0x7fef, 0x7ff1, 0x7ff3, 0x7ff4,
|
||||
0x7ff6, 0x7ff7, 0x7ff8, 0x7ff9, 0x7ffa, 0x7ffa, 0x7ffb, 0x7ffc, 0x7ffc, 0x7ffd, 0x7ffd, 0x7ffd, 0x7ffe, 0x7ffe,
|
||||
0x7ffe, 0x7ffe, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff,
|
||||
0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff,
|
||||
0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff,
|
||||
0x7fff, 0x7fff, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
|
||||
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
|
||||
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8001, 0x8001, 0x8001, 0x8001, 0x8001, 0x8001,
|
||||
0x8001, 0x8001, 0x8001, 0x8002, 0x8002, 0x8002, 0x8002, 0x8003, 0x8003, 0x8003, 0x8004, 0x8004, 0x8005, 0x8006,
|
||||
0x8006, 0x8007, 0x8008, 0x8009, 0x800a, 0x800c, 0x800d, 0x800f, 0x8011, 0x8013, 0x8016, 0x8019, 0x801c, 0x8020,
|
||||
0x8024, 0x8029, 0x802f, 0x8035, 0x803c, 0x8044, 0x804d, 0x8057, 0x8062, 0x806f, 0x807e, 0x808f, 0x80a2, 0x80b8,
|
||||
0x80d0, 0x80ec, 0x810b, 0x812e, 0x8156, 0x8183, 0x81b7, 0x81f1, 0x8232, 0x827c, 0x82d0, 0x832f, 0x839a, 0x8412,
|
||||
0x849b, 0x8535, 0x85e2, 0x86a5, 0x8781, 0x8878, 0x898e, 0x8ac6, 0x8c24, 0x8dac, 0x8f62, 0x914b, 0x936b, 0x95c9,
|
||||
0x9869, 0x9b50, 0x9e84, 0xa20a, 0xa5e6, 0xaa1e, 0xaeb3, 0xb3aa, 0xb903, 0xbebe, 0xc4d9, 0xcb52, 0xd221, 0xd941,
|
||||
0xe0a7, 0xe847, 0xf015, 0xf803,
|
||||
};
|
||||
|
||||
const q15_t tanhLTable_q15[128] = {
|
||||
0x0000, 0x0400, 0x07fd, 0x0bf7, 0x0feb, 0x13d7, 0x17b9, 0x1b90, 0x1f59, 0x2314, 0x26bf, 0x2a58, 0x2ddf,
|
||||
0x3151, 0x34ae, 0x37f6, 0x3b27, 0x3e40, 0x4142, 0x442c, 0x46fd, 0x49b6, 0x4c56, 0x4edd, 0x514d, 0x53a3,
|
||||
0x55e2, 0x580a, 0x5a1a, 0x5c13, 0x5df6, 0x5fc4, 0x617c, 0x6320, 0x64b0, 0x662d, 0x6797, 0x68f0, 0x6a37,
|
||||
0x6b6e, 0x6c95, 0x6dac, 0x6eb5, 0x6fb0, 0x709e, 0x717f, 0x7254, 0x731e, 0x73dc, 0x7490, 0x753a, 0x75da,
|
||||
0x7672, 0x7701, 0x7788, 0x7807, 0x787f, 0x78f0, 0x795b, 0x79bf, 0x7a1e, 0x7a77, 0x7acb, 0x7b1b, 0x849b,
|
||||
0x84e5, 0x8535, 0x8589, 0x85e2, 0x8641, 0x86a5, 0x8710, 0x8781, 0x87f9, 0x8878, 0x88ff, 0x898e, 0x8a26,
|
||||
0x8ac6, 0x8b70, 0x8c24, 0x8ce2, 0x8dac, 0x8e81, 0x8f62, 0x9050, 0x914b, 0x9254, 0x936b, 0x9492, 0x95c9,
|
||||
0x9710, 0x9869, 0x99d3, 0x9b50, 0x9ce0, 0x9e84, 0xa03c, 0xa20a, 0xa3ed, 0xa5e6, 0xa7f6, 0xaa1e, 0xac5d,
|
||||
0xaeb3, 0xb123, 0xb3aa, 0xb64a, 0xb903, 0xbbd4, 0xbebe, 0xc1c0, 0xc4d9, 0xc80a, 0xcb52, 0xceaf, 0xd221,
|
||||
0xd5a8, 0xd941, 0xdcec, 0xe0a7, 0xe470, 0xe847, 0xec29, 0xf015, 0xf409, 0xf803, 0xfc00,
|
||||
};
|
||||
|
||||
const q15_t tanhHTable_q15[192] = {
|
||||
0x7b65, 0x7bee, 0x7c66, 0x7cd1, 0x7d30, 0x7d84, 0x7dce, 0x7e0f, 0x7e49, 0x7e7d, 0x7eaa, 0x7ed2, 0x7ef5, 0x7f14,
|
||||
0x7f30, 0x7f48, 0x7f5e, 0x7f71, 0x7f82, 0x7f91, 0x7f9e, 0x7fa9, 0x7fb3, 0x7fbc, 0x7fc4, 0x7fcb, 0x7fd1, 0x7fd7,
|
||||
0x7fdc, 0x7fe0, 0x7fe4, 0x7fe7, 0x7fea, 0x7fed, 0x7fef, 0x7ff1, 0x7ff3, 0x7ff4, 0x7ff6, 0x7ff7, 0x7ff8, 0x7ff9,
|
||||
0x7ffa, 0x7ffa, 0x7ffb, 0x7ffc, 0x7ffc, 0x7ffd, 0x7ffd, 0x7ffd, 0x7ffe, 0x7ffe, 0x7ffe, 0x7ffe, 0x7fff, 0x7fff,
|
||||
0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff,
|
||||
0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff,
|
||||
0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x8000, 0x8000,
|
||||
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
|
||||
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
|
||||
0x8000, 0x8000, 0x8000, 0x8000, 0x8001, 0x8001, 0x8001, 0x8001, 0x8001, 0x8001, 0x8001, 0x8001, 0x8001, 0x8002,
|
||||
0x8002, 0x8002, 0x8002, 0x8003, 0x8003, 0x8003, 0x8004, 0x8004, 0x8005, 0x8006, 0x8006, 0x8007, 0x8008, 0x8009,
|
||||
0x800a, 0x800c, 0x800d, 0x800f, 0x8011, 0x8013, 0x8016, 0x8019, 0x801c, 0x8020, 0x8024, 0x8029, 0x802f, 0x8035,
|
||||
0x803c, 0x8044, 0x804d, 0x8057, 0x8062, 0x806f, 0x807e, 0x808f, 0x80a2, 0x80b8, 0x80d0, 0x80ec, 0x810b, 0x812e,
|
||||
0x8156, 0x8183, 0x81b7, 0x81f1, 0x8232, 0x827c, 0x82d0, 0x832f, 0x839a, 0x8412,
|
||||
};
|
|
@ -0,0 +1,121 @@
|
|||
/*
|
||||
* Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the License); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* Project: CMSIS NN Library
|
||||
* Title: arm_q7_to_q15_no_shift.c
|
||||
* Description: Converts the elements of the Q7 vector to Q15 vector without left-shift
|
||||
*
|
||||
* $Date: May 29, 2020
|
||||
* $Revision: V.1.0.2
|
||||
*
|
||||
* Target Processor: Cortex-M cores
|
||||
*
|
||||
* -------------------------------------------------------------------- */
|
||||
|
||||
#include "arm_nnsupportfunctions.h"
|
||||
|
||||
/**
|
||||
* @ingroup groupSupport
|
||||
*/
|
||||
|
||||
/**
|
||||
* @addtogroup nndata_convert
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Converts the elements of the Q7 vector to Q15 vector without left-shift
|
||||
* @param[in] *pSrc points to the Q7 input vector
|
||||
* @param[out] *pDst points to the Q15 output vector
|
||||
* @param[in] blockSize length of the input vector
|
||||
*
|
||||
* \par Description:
|
||||
*
|
||||
* The equation used for the conversion process is:
|
||||
*
|
||||
* <pre>
|
||||
* pDst[n] = (q15_t) pSrc[n]; 0 <= n < blockSize.
|
||||
* </pre>
|
||||
*
|
||||
*/
|
||||
|
||||
void arm_q7_to_q15_no_shift(const q7_t *pSrc, q15_t *pDst, uint32_t blockSize)
|
||||
{
|
||||
const q7_t *pIn = pSrc;
|
||||
uint32_t blkCnt;
|
||||
|
||||
#if defined(ARM_MATH_DSP)
|
||||
q31_t in;
|
||||
q31_t in1, in2;
|
||||
q31_t out1, out2;
|
||||
|
||||
/*loop Unrolling */
|
||||
blkCnt = blockSize >> 2u;
|
||||
|
||||
/* First part of the processing with loop unrolling. Compute 4 outputs at a time. */
|
||||
while (blkCnt > 0u)
|
||||
{
|
||||
in = arm_nn_read_q7x4_ia(&pIn);
|
||||
|
||||
/* rotatate in by 8 and extend two q7_t values to q15_t values */
|
||||
in1 = __SXTB16(__ROR((uint32_t)in, 8));
|
||||
|
||||
/* extend remaining two q7_t values to q15_t values */
|
||||
in2 = __SXTB16(in);
|
||||
|
||||
#ifndef ARM_MATH_BIG_ENDIAN
|
||||
out2 = (int32_t)__PKHTB(in1, in2, 16);
|
||||
out1 = (int32_t)__PKHBT(in2, in1, 16);
|
||||
#else
|
||||
out1 = (int32_t)__PKHTB(in1, in2, 16);
|
||||
out2 = (int32_t)__PKHBT(in2, in1, 16);
|
||||
#endif
|
||||
arm_nn_write_q15x2_ia(&pDst, out1);
|
||||
arm_nn_write_q15x2_ia(&pDst, out2);
|
||||
|
||||
/* Decrement the loop counter */
|
||||
blkCnt--;
|
||||
}
|
||||
|
||||
/* If the blockSize is not a multiple of 4, compute any remaining output samples here.
|
||||
** No loop unrolling is used. */
|
||||
blkCnt = blockSize % 0x4u;
|
||||
|
||||
#else
|
||||
|
||||
/* Run the below code for Cortex-M0 */
|
||||
|
||||
/* Loop over blockSize number of values */
|
||||
blkCnt = blockSize;
|
||||
|
||||
#endif /* #ifndef ARM_MATH_CM0_FAMILY */
|
||||
|
||||
while (blkCnt > 0u)
|
||||
{
|
||||
/* convert from q7 to q15 and then store the results in the destination buffer */
|
||||
*pDst++ = (q15_t)*pIn++;
|
||||
|
||||
/* Decrement the loop counter */
|
||||
blkCnt--;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @} end of nndata_convert group
|
||||
*/
|
|
@ -0,0 +1,143 @@
|
|||
/*
|
||||
* Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the License); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* Project: CMSIS NN Library
|
||||
* Title: arm_q7_to_q15_reordered_no_shift.c
|
||||
* Description: Converts the elements of the Q7 vector to reordered Q15 vector without left-shift
|
||||
*
|
||||
* $Date: July 20, 2021
|
||||
* $Revision: V.1.1.1
|
||||
*
|
||||
* Target Processor: Cortex-M cores
|
||||
*
|
||||
* -------------------------------------------------------------------- */
|
||||
|
||||
#include "arm_nnsupportfunctions.h"
|
||||
|
||||
/**
|
||||
* @ingroup groupSupport
|
||||
*/
|
||||
|
||||
/**
|
||||
* @addtogroup nndata_convert
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Converts the elements of the Q7 vector to reordered Q15 vector without left-shift
|
||||
* @param[in] *pSrc points to the Q7 input vector
|
||||
* @param[out] *pDst points to the Q15 output vector
|
||||
* @param[in] blockSize length of the input vector
|
||||
*
|
||||
* @details
|
||||
*
|
||||
* This function does the q7 to q15 expansion with re-ordering
|
||||
*
|
||||
* <pre>
|
||||
* | A1 | A2 | A3 | A4 |
|
||||
*
|
||||
* 0 7 8 15 16 23 24 31
|
||||
* </pre>
|
||||
*
|
||||
* is converted into:
|
||||
*
|
||||
* <pre>
|
||||
* | A1 | A3 | and | A2 | A4 |
|
||||
*
|
||||
* 0 15 16 31 0 15 16 31
|
||||
* </pre>
|
||||
*
|
||||
*
|
||||
* This looks strange but is natural considering how sign-extension is done at
|
||||
* assembly level.
|
||||
*
|
||||
* The expansion of other other oprand will follow the same rule so that the end
|
||||
* results are the same.
|
||||
*
|
||||
* The tail (i.e., last (N % 4) elements) will still be in original order.
|
||||
*
|
||||
*/
|
||||
|
||||
void arm_q7_to_q15_reordered_no_shift(const q7_t *pSrc, q15_t *pDst, uint32_t blockSize)
|
||||
{
|
||||
const q7_t *pIn = pSrc; /* Src pointer */
|
||||
uint32_t blkCnt; /* loop counter */
|
||||
|
||||
#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI)
|
||||
q31_t in;
|
||||
q31_t in1, in2;
|
||||
|
||||
/* Run the below code for Cortex-M4 and Cortex-M3 */
|
||||
|
||||
/*loop Unrolling */
|
||||
blkCnt = blockSize >> 2u;
|
||||
|
||||
/* First part of the processing with loop unrolling. Compute 4 outputs at a time.
|
||||
** a second loop below computes the remaining 1 to 3 samples. */
|
||||
while (blkCnt > 0u)
|
||||
{
|
||||
/* C = (q15_t) A << 8 */
|
||||
/* convert from q7 to q15 and then store the results in the destination buffer */
|
||||
in = arm_nn_read_q7x4_ia(&pIn);
|
||||
|
||||
/* rotatate in by 8 and extend two q7_t values to q15_t values */
|
||||
in1 = __SXTB16(__ROR((uint32_t)in, 8));
|
||||
|
||||
/* extend remainig two q7_t values to q15_t values */
|
||||
in2 = __SXTB16(in);
|
||||
|
||||
#ifndef ARM_MATH_BIG_ENDIAN
|
||||
arm_nn_write_q7x4_ia((q7_t **)&pDst, in2);
|
||||
arm_nn_write_q7x4_ia((q7_t **)&pDst, in1);
|
||||
#else
|
||||
arm_nn_write_q7x4_ia((q7_t **)&pDst, in1);
|
||||
arm_nn_write_q7x4_ia((q7_t **)&pDst, in2);
|
||||
#endif
|
||||
|
||||
/* Decrement the loop counter */
|
||||
blkCnt--;
|
||||
}
|
||||
|
||||
/* If the blockSize is not a multiple of 4, compute any remaining output samples here.
|
||||
** No loop unrolling is used. */
|
||||
blkCnt = blockSize % 0x4u;
|
||||
|
||||
#else
|
||||
|
||||
/* Run the below code for Cortex-M0 */
|
||||
|
||||
/* Loop over blockSize number of values */
|
||||
blkCnt = blockSize;
|
||||
|
||||
#endif /* #ifndef ARM_MATH_CM0_FAMILY */
|
||||
|
||||
while (blkCnt > 0u)
|
||||
{
|
||||
/* C = (q15_t) A << 8 */
|
||||
/* convert from q7 to q15 and then store the results in the destination buffer */
|
||||
*pDst++ = (q15_t)*pIn++;
|
||||
|
||||
/* Decrement the loop counter */
|
||||
blkCnt--;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @} end of q7_to_x group
|
||||
*/
|
|
@ -0,0 +1,100 @@
|
|||
/*
|
||||
* Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the License); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* Project: CMSIS NN Library
|
||||
* Title: arm_q7_to_q15_reordered_with_offset.c
|
||||
* Description: Converts the elements of the Q7 vector to a reordered Q15 vector with an added offset. The re-ordering
|
||||
* is a signature of sign extension intrinsic(DSP extension).
|
||||
*
|
||||
* $Date: May 29, 2020
|
||||
* $Revision: V.2.0.3
|
||||
*
|
||||
* Target Processor: Cortex-M cores
|
||||
*
|
||||
* -------------------------------------------------------------------- */
|
||||
|
||||
#include "arm_nnsupportfunctions.h"
|
||||
|
||||
/**
|
||||
* @ingroup groupSupport
|
||||
*/
|
||||
|
||||
/**
|
||||
* @addtogroup nndata_convert
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Converts the elements of the Q7 vector to a reordered Q15 vector with an added offset.
|
||||
*
|
||||
* @note Refer header file for details.
|
||||
*
|
||||
*/
|
||||
|
||||
void arm_q7_to_q15_reordered_with_offset(const q7_t *src, q15_t *dst, uint32_t block_size, q15_t offset)
|
||||
{
|
||||
|
||||
#if defined(ARM_MATH_DSP)
|
||||
uint32_t block_cnt;
|
||||
/* Run the below code for cores that support SIMD instructions */
|
||||
q31_t in_q7x4;
|
||||
q31_t out_q15x2_1;
|
||||
q31_t out_q15x2_2;
|
||||
|
||||
/*loop unrolling */
|
||||
block_cnt = block_size >> 2u;
|
||||
|
||||
/* First part of the processing with loop unrolling. Compute 4 outputs at a time. */
|
||||
const q31_t offset_q15x2 = (q31_t)__PKHBT(offset, offset, 16);
|
||||
while (block_cnt > 0u)
|
||||
{
|
||||
/* convert from q7 to q15 and then store the results in the destination buffer */
|
||||
in_q7x4 = arm_nn_read_q7x4_ia(&src);
|
||||
|
||||
/* Extract and sign extend each of the four q7 values to q15 */
|
||||
out_q15x2_1 = __SXTAB16(offset_q15x2, __ROR((uint32_t)in_q7x4, 8));
|
||||
out_q15x2_2 = __SXTAB16(offset_q15x2, in_q7x4);
|
||||
|
||||
arm_nn_write_q15x2_ia(&dst, out_q15x2_2);
|
||||
arm_nn_write_q15x2_ia(&dst, out_q15x2_1);
|
||||
|
||||
block_cnt--;
|
||||
}
|
||||
/* Handle left over samples */
|
||||
block_cnt = block_size % 0x4u;
|
||||
|
||||
while (block_cnt > 0u)
|
||||
{
|
||||
*dst++ = (q15_t)*src++ + offset;
|
||||
|
||||
/* Decrement the loop counter */
|
||||
block_cnt--;
|
||||
}
|
||||
#else
|
||||
(void)src;
|
||||
(void)dst;
|
||||
(void)block_size;
|
||||
(void)offset;
|
||||
/* Not available */
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* @} end of nndata_convert group
|
||||
*/
|
|
@ -0,0 +1,114 @@
|
|||
/*
|
||||
* Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the License); you may
|
||||
* not use this file except in_q7x4 compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in_q7x4 writing, software
|
||||
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* Project: CMSIS NN Library
|
||||
* Title: arm_q7_to_q15_with_offset.c
|
||||
* Description: Converts the elements of the Q7 vector to Q15 vector with an added offset
|
||||
*
|
||||
* $Date: March 3, 2020
|
||||
* $Revision: V.2.0.2
|
||||
*
|
||||
* Target Processor: Cortex-M cores
|
||||
*
|
||||
* -------------------------------------------------------------------- */
|
||||
|
||||
#include "arm_nnsupportfunctions.h"
|
||||
|
||||
/**
|
||||
* @ingroup groupSupport
|
||||
*/
|
||||
|
||||
/**
|
||||
* @addtogroup nndata_convert
|
||||
* @{
|
||||
*/
|
||||
|
||||
void arm_q7_to_q15_with_offset(const q7_t *src, q15_t *dst, uint32_t block_size, q15_t offset)
|
||||
{
|
||||
int block_cnt;
|
||||
|
||||
#if defined(ARM_MATH_MVEI)
|
||||
|
||||
int16x8_t source;
|
||||
const int16x8_t source_offset = vdupq_n_s16(offset);
|
||||
block_cnt = block_size / 8;
|
||||
|
||||
while (block_cnt > 0)
|
||||
{
|
||||
source = vldrbq_s16(src);
|
||||
source = vaddq_s16(source, source_offset);
|
||||
vstrhq_s16(dst, source);
|
||||
dst += 8;
|
||||
src += 8;
|
||||
block_cnt--;
|
||||
}
|
||||
|
||||
block_cnt = block_size & 0x7;
|
||||
|
||||
#elif defined(ARM_MATH_DSP)
|
||||
/* Run the below code for cores that support SIMD instructions */
|
||||
q31_t in_q7x4;
|
||||
q31_t in_q15x2_1;
|
||||
q31_t in_q15x2_2;
|
||||
q31_t out_q15x2_1;
|
||||
q31_t out_q15x2_2;
|
||||
|
||||
/*loop unrolling */
|
||||
block_cnt = block_size >> 2;
|
||||
|
||||
/* First part of the processing with loop unrolling. Compute 4 outputs at a time. */
|
||||
const q31_t offset_q15x2 = __PKHBT(offset, offset, 16);
|
||||
while (block_cnt > 0)
|
||||
{
|
||||
/* convert from q7 to q15 and then store the results in the destination buffer */
|
||||
in_q7x4 = arm_nn_read_q7x4_ia(&src);
|
||||
|
||||
/* Extract and sign extend each of the four q7 values to q15 */
|
||||
in_q15x2_1 = __SXTAB16(offset_q15x2, __ROR(in_q7x4, 8));
|
||||
in_q15x2_2 = __SXTAB16(offset_q15x2, in_q7x4);
|
||||
|
||||
out_q15x2_2 = __PKHTB(in_q15x2_1, in_q15x2_2, 16);
|
||||
out_q15x2_1 = __PKHBT(in_q15x2_2, in_q15x2_1, 16);
|
||||
|
||||
arm_nn_write_q15x2_ia(&dst, out_q15x2_1);
|
||||
arm_nn_write_q15x2_ia(&dst, out_q15x2_2);
|
||||
|
||||
block_cnt--;
|
||||
}
|
||||
/* Handle left over samples */
|
||||
block_cnt = block_size % 0x4;
|
||||
|
||||
#else
|
||||
/* Run the below code for Cortex-M0 */
|
||||
/* Loop over block_size number of values */
|
||||
block_cnt = block_size;
|
||||
#endif
|
||||
|
||||
while (block_cnt > 0)
|
||||
{
|
||||
*dst++ = (q15_t)*src++ + offset;
|
||||
|
||||
/* Decrement the loop counter */
|
||||
block_cnt--;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @} end of nndata_convert group
|
||||
*/
|
Loading…
Add table
Add a link
Reference in a new issue