CMSIS-DSP: New MVE implementations of the complex dot product and vector product.

pull/19/head
Christophe Favergeon 5 years ago
parent c4283d209f
commit 8fff9ebe29

@ -86,42 +86,92 @@ void arm_cmplx_dot_prod_f16(
float16_t * realResult, float16_t * realResult,
float16_t * imagResult) float16_t * imagResult)
{ {
uint32_t blockSize = numSamples * CMPLX_DIM; /* loop counters */ int32_t blkCnt;
uint32_t blkCnt;
float16_t real_sum, imag_sum; float16_t real_sum, imag_sum;
f16x8_t vecSrcA, vecSrcB; f16x8_t vecSrcA, vecSrcB;
f16x8_t vec_acc = vdupq_n_f16(0.0f); f16x8_t vec_acc = vdupq_n_f16(0.0f16);
f16x8_t vecSrcC, vecSrcD;
blkCnt = (numSamples >> 3);
blkCnt -= 1;
if (blkCnt > 0) {
/* should give more freedom to generate stall free code */
vecSrcA = vld1q( pSrcA);
vecSrcB = vld1q( pSrcB);
pSrcA += 8;
pSrcB += 8;
/* Compute 2 complex samples at a time */ while (blkCnt > 0) {
blkCnt = blockSize >> 3U; vec_acc = vcmlaq(vec_acc, vecSrcA, vecSrcB);
vecSrcC = vld1q(pSrcA);
pSrcA += 8;
while (blkCnt > 0U) vec_acc = vcmlaq_rot90(vec_acc, vecSrcA, vecSrcB);
{ vecSrcD = vld1q(pSrcB);
pSrcB += 8;
vec_acc = vcmlaq(vec_acc, vecSrcC, vecSrcD);
vecSrcA = vld1q(pSrcA); vecSrcA = vld1q(pSrcA);
pSrcA += 8;
vec_acc = vcmlaq_rot90(vec_acc, vecSrcC, vecSrcD);
vecSrcB = vld1q(pSrcB); vecSrcB = vld1q(pSrcB);
pSrcB += 8;
/*
* Decrement the blockSize loop counter
*/
blkCnt--;
}
/* process last elements out of the loop avoid the armclang breaking the SW pipeline */
vec_acc = vcmlaq(vec_acc, vecSrcA, vecSrcB); vec_acc = vcmlaq(vec_acc, vecSrcA, vecSrcB);
vecSrcC = vld1q(pSrcA);
vec_acc = vcmlaq_rot90(vec_acc, vecSrcA, vecSrcB); vec_acc = vcmlaq_rot90(vec_acc, vecSrcA, vecSrcB);
vecSrcD = vld1q(pSrcB);
vec_acc = vcmlaq(vec_acc, vecSrcC, vecSrcD);
vec_acc = vcmlaq_rot90(vec_acc, vecSrcC, vecSrcD);
/* /*
* Decrement the blkCnt loop counter * tail
* Advance vector source and destination pointers
*/ */
blkCnt = CMPLX_DIM * (numSamples & 7);
while (blkCnt > 0) {
mve_pred16_t p = vctp16q(blkCnt);
pSrcA += 8; pSrcA += 8;
pSrcB += 8; pSrcB += 8;
blkCnt--;
vecSrcA = vldrhq_z_f16(pSrcA, p);
vecSrcB = vldrhq_z_f16(pSrcB, p);
vec_acc = vcmlaq_m(vec_acc, vecSrcA, vecSrcB, p);
vec_acc = vcmlaq_rot90_m(vec_acc, vecSrcA, vecSrcB, p);
blkCnt -= 8;
} }
} else {
/* small vector */
blkCnt = numSamples * CMPLX_DIM;
vec_acc = vdupq_n_f16(0.0f16);
/* Tail */ do {
blkCnt = (blockSize & 7); mve_pred16_t p = vctp16q(blkCnt);
if (blkCnt > 0U) vecSrcA = vldrhq_z_f16(pSrcA, p);
{ vecSrcB = vldrhq_z_f16(pSrcB, p);
mve_pred16_t p0 = vctp16q(blkCnt);
vecSrcA = vld1q(pSrcA); vec_acc = vcmlaq_m(vec_acc, vecSrcA, vecSrcB, p);
vecSrcB = vld1q(pSrcB); vec_acc = vcmlaq_rot90_m(vec_acc, vecSrcA, vecSrcB, p);
vec_acc = vcmlaq_m(vec_acc, vecSrcA, vecSrcB, p0);
vec_acc = vcmlaq_rot90_m(vec_acc, vecSrcA, vecSrcB, p0); /*
* Decrement the blkCnt loop counter
* Advance vector source and destination pointers
*/
pSrcA += 8;
pSrcB += 8;
blkCnt -= 8;
}
while (blkCnt > 0);
} }
/* Sum the partial parts */ /* Sum the partial parts */

@ -83,56 +83,94 @@ void arm_cmplx_dot_prod_f32(
float32_t * realResult, float32_t * realResult,
float32_t * imagResult) float32_t * imagResult)
{ {
uint32_t blockSize = numSamples * CMPLX_DIM; /* loop counters */ int32_t blkCnt;
uint32_t blkCnt;
float32_t real_sum, imag_sum; float32_t real_sum, imag_sum;
f32x4_t vecSrcA, vecSrcB; f32x4_t vecSrcA, vecSrcB;
f32x4_t vec_acc = vdupq_n_f32(0.0f); f32x4_t vec_acc = vdupq_n_f32(0.0f);
float32_t a0,b0,c0,d0; f32x4_t vecSrcC, vecSrcD;
/* Compute 2 complex samples at a time */
blkCnt = blockSize >> 2U;
while (blkCnt > 0U) blkCnt = numSamples >> 2;
{ blkCnt -= 1;
if (blkCnt > 0) {
/* should give more freedom to generate stall free code */
vecSrcA = vld1q(pSrcA); vecSrcA = vld1q(pSrcA);
vecSrcB = vld1q(pSrcB); vecSrcB = vld1q(pSrcB);
pSrcA += 4;
pSrcB += 4;
while (blkCnt > 0) {
vec_acc = vcmlaq(vec_acc, vecSrcA, vecSrcB); vec_acc = vcmlaq(vec_acc, vecSrcA, vecSrcB);
vecSrcC = vld1q(pSrcA);
pSrcA += 4;
vec_acc = vcmlaq_rot90(vec_acc, vecSrcA, vecSrcB); vec_acc = vcmlaq_rot90(vec_acc, vecSrcA, vecSrcB);
vecSrcD = vld1q(pSrcB);
pSrcB += 4;
/* vec_acc = vcmlaq(vec_acc, vecSrcC, vecSrcD);
* Decrement the blkCnt loop counter vecSrcA = vld1q(pSrcA);
* Advance vector source and destination pointers
*/
pSrcA += 4; pSrcA += 4;
vec_acc = vcmlaq_rot90(vec_acc, vecSrcC, vecSrcD);
vecSrcB = vld1q(pSrcB);
pSrcB += 4; pSrcB += 4;
/*
* Decrement the blockSize loop counter
*/
blkCnt--; blkCnt--;
} }
/* process last elements out of the loop avoid the armclang breaking the SW pipeline */
vec_acc = vcmlaq(vec_acc, vecSrcA, vecSrcB);
vecSrcC = vld1q(pSrcA);
real_sum = vgetq_lane(vec_acc, 0) + vgetq_lane(vec_acc, 2); vec_acc = vcmlaq_rot90(vec_acc, vecSrcA, vecSrcB);
imag_sum = vgetq_lane(vec_acc, 1) + vgetq_lane(vec_acc, 3); vecSrcD = vld1q(pSrcB);
/* Tail */ vec_acc = vcmlaq(vec_acc, vecSrcC, vecSrcD);
blkCnt = (blockSize & 3) >> 1; vec_acc = vcmlaq_rot90(vec_acc, vecSrcC, vecSrcD);
while (blkCnt > 0U) /*
{ * tail
a0 = *pSrcA++; */
b0 = *pSrcA++; blkCnt = CMPLX_DIM * (numSamples & 3);
c0 = *pSrcB++; while (blkCnt > 0) {
d0 = *pSrcB++; mve_pred16_t p = vctp32q(blkCnt);
pSrcA += 4;
pSrcB += 4;
vecSrcA = vldrwq_z_f32(pSrcA, p);
vecSrcB = vldrwq_z_f32(pSrcB, p);
vec_acc = vcmlaq_m(vec_acc, vecSrcA, vecSrcB, p);
vec_acc = vcmlaq_rot90_m(vec_acc, vecSrcA, vecSrcB, p);
blkCnt -= 4;
}
} else {
/* small vector */
blkCnt = numSamples * CMPLX_DIM;
vec_acc = vdupq_n_f32(0.0f);
real_sum += a0 * c0; do {
imag_sum += a0 * d0; mve_pred16_t p = vctp32q(blkCnt);
real_sum -= b0 * d0;
imag_sum += b0 * c0;
/* Decrement loop counter */ vecSrcA = vldrwq_z_f32(pSrcA, p);
blkCnt--; vecSrcB = vldrwq_z_f32(pSrcB, p);
vec_acc = vcmlaq_m(vec_acc, vecSrcA, vecSrcB, p);
vec_acc = vcmlaq_rot90_m(vec_acc, vecSrcA, vecSrcB, p);
/*
* Decrement the blkCnt loop counter
* Advance vector source and destination pointers
*/
pSrcA += 4;
pSrcB += 4;
blkCnt -= 4;
}
while (blkCnt > 0);
} }
real_sum = vgetq_lane(vec_acc, 0) + vgetq_lane(vec_acc, 2);
imag_sum = vgetq_lane(vec_acc, 1) + vgetq_lane(vec_acc, 3);
/* /*
* Store the real and imaginary results in the destination buffers * Store the real and imaginary results in the destination buffers

@ -62,27 +62,22 @@ void arm_cmplx_dot_prod_q15(
q31_t * realResult, q31_t * realResult,
q31_t * imagResult) q31_t * imagResult)
{ {
int32_t blkCnt;
uint32_t blockSize = numSamples * CMPLX_DIM; /* loop counters */ q63_t accReal = 0LL;
uint32_t blkCnt; q63_t accImag = 0LL;
q15_t a0,b0,c0,d0;
q63_t accReal = 0LL; q63_t accImag = 0LL;
q15x8_t vecSrcA, vecSrcB; q15x8_t vecSrcA, vecSrcB;
q15x8_t vecSrcC, vecSrcD;
blkCnt = (numSamples >> 3);
blkCnt -= 1;
if (blkCnt > 0) {
/* should give more freedom to generate stall free code */ /* should give more freedom to generate stall free code */
vecSrcA = vld1q(pSrcA); vecSrcA = vld1q(pSrcA);
vecSrcB = vld1q(pSrcB); vecSrcB = vld1q(pSrcB);
pSrcA += 8; pSrcA += 8;
pSrcB += 8; pSrcB += 8;
/* Compute 4 complex samples at a time */ while (blkCnt > 0) {
blkCnt = blockSize >> 3;
while (blkCnt > 0U)
{
q15x8_t vecSrcC, vecSrcD;
accReal = vmlsldavaq(accReal, vecSrcA, vecSrcB); accReal = vmlsldavaq(accReal, vecSrcA, vecSrcB);
vecSrcC = vld1q(pSrcA); vecSrcC = vld1q(pSrcA);
@ -105,33 +100,60 @@ void arm_cmplx_dot_prod_q15(
blkCnt--; blkCnt--;
} }
/* Tail */ /* process last elements out of the loop avoid the armclang breaking the SW pipeline */
pSrcA -= 8; accReal = vmlsldavaq(accReal, vecSrcA, vecSrcB);
pSrcB -= 8; vecSrcC = vld1q(pSrcA);
accImag = vmlaldavaxq(accImag, vecSrcA, vecSrcB);
vecSrcD = vld1q(pSrcB);
blkCnt = (blockSize & 7) >> 1; accReal = vmlsldavaq(accReal, vecSrcC, vecSrcD);
vecSrcA = vld1q(pSrcA);
while (blkCnt > 0U) accImag = vmlaldavaxq(accImag, vecSrcC, vecSrcD);
{ vecSrcB = vld1q(pSrcB);
a0 = *pSrcA++;
b0 = *pSrcA++;
c0 = *pSrcB++;
d0 = *pSrcB++;
accReal += (q31_t)a0 * c0; /*
accImag += (q31_t)a0 * d0; * tail
accReal -= (q31_t)b0 * d0; */
accImag += (q31_t)b0 * c0; blkCnt = CMPLX_DIM * (numSamples & 7);
do {
mve_pred16_t p = vctp16q(blkCnt);
/* Decrement loop counter */ pSrcA += 8;
blkCnt--; pSrcB += 8;
vecSrcA = vldrhq_z_s16(pSrcA, p);
vecSrcB = vldrhq_z_s16(pSrcB, p);
accReal = vmlsldavaq_p(accReal, vecSrcA, vecSrcB, p);
accImag = vmlaldavaxq_p(accImag, vecSrcA, vecSrcB, p);
blkCnt -= 8;
} }
while ((int32_t) blkCnt > 0);
} else {
blkCnt = numSamples * CMPLX_DIM;
while (blkCnt > 0) {
mve_pred16_t p = vctp16q(blkCnt);
/* Store real and imaginary result in 8.24 format */ vecSrcA = vldrhq_z_s16(pSrcA, p);
/* Convert real data in 34.30 to 8.24 by 6 right shifts */ vecSrcB = vldrhq_z_s16(pSrcB, p);
*realResult = (q31_t) (accReal >> 6);
/* Convert imaginary data in 34.30 to 8.24 by 6 right shifts */ accReal = vmlsldavaq_p(accReal, vecSrcA, vecSrcB, p);
*imagResult = (q31_t) (accImag >> 6); accImag = vmlaldavaxq_p(accImag, vecSrcA, vecSrcB, p);
/*
* Decrement the blkCnt loop counter
* Advance vector source and destination pointers
*/
pSrcA += 8;
pSrcB += 8;
blkCnt -= 8;
}
}
*realResult = asrl(accReal, (14 - 8));
*imagResult = asrl(accImag, (14 - 8));
} }
#else #else
void arm_cmplx_dot_prod_q15( void arm_cmplx_dot_prod_q15(

@ -64,60 +64,99 @@ void arm_cmplx_dot_prod_q31(
q63_t * realResult, q63_t * realResult,
q63_t * imagResult) q63_t * imagResult)
{ {
uint32_t blockSize = numSamples * CMPLX_DIM; /* loop counters */ int32_t blkCnt;
uint32_t blkCnt;
q31x4_t vecSrcA, vecSrcB;
q63_t accReal = 0LL; q63_t accReal = 0LL;
q63_t accImag = 0LL; q63_t accImag = 0LL;
q31x4_t vecSrcA, vecSrcB;
q31x4_t vecSrcC, vecSrcD;
q31_t a0,b0,c0,d0; blkCnt = numSamples >> 2;
blkCnt -= 1;
if (blkCnt > 0) {
/* should give more freedom to generate stall free code */
vecSrcA = vld1q(pSrcA);
vecSrcB = vld1q(pSrcB);
pSrcA += 4;
pSrcB += 4;
/* Compute 2 complex samples at a time */ while (blkCnt > 0) {
blkCnt = blockSize >> 2U;
while (blkCnt > 0U) accReal = vrmlsldavhaq(accReal, vecSrcA, vecSrcB);
{ vecSrcC = vld1q(pSrcA);
pSrcA += 4;
accImag = vrmlaldavhaxq(accImag, vecSrcA, vecSrcB);
vecSrcD = vld1q(pSrcB);
pSrcB += 4;
accReal = vrmlsldavhaq(accReal, vecSrcC, vecSrcD);
vecSrcA = vld1q(pSrcA); vecSrcA = vld1q(pSrcA);
pSrcA += 4;
accImag = vrmlaldavhaxq(accImag, vecSrcC, vecSrcD);
vecSrcB = vld1q(pSrcB); vecSrcB = vld1q(pSrcB);
pSrcB += 4;
/*
* Decrement the blockSize loop counter
*/
blkCnt--;
}
/* process last elements out of the loop avoid the armclang breaking the SW pipeline */
accReal = vrmlsldavhaq(accReal, vecSrcA, vecSrcB); accReal = vrmlsldavhaq(accReal, vecSrcA, vecSrcB);
vecSrcC = vld1q(pSrcA);
accImag = vrmlaldavhaxq(accImag, vecSrcA, vecSrcB); accImag = vrmlaldavhaxq(accImag, vecSrcA, vecSrcB);
vecSrcD = vld1q(pSrcB);
accReal = vrmlsldavhaq(accReal, vecSrcC, vecSrcD);
vecSrcA = vld1q(pSrcA);
accImag = vrmlaldavhaxq(accImag, vecSrcC, vecSrcD);
vecSrcB = vld1q(pSrcB);
/* /*
* Decrement the blkCnt loop counter * tail
* Advance vector source and destination pointers
*/ */
blkCnt = CMPLX_DIM * (numSamples & 3);
do {
mve_pred16_t p = vctp32q(blkCnt);
pSrcA += 4; pSrcA += 4;
pSrcB += 4; pSrcB += 4;
blkCnt --;
}
accReal = asrl(accReal, (14 - 8)); vecSrcA = vldrwq_z_s32(pSrcA, p);
accImag = asrl(accImag, (14 - 8)); vecSrcB = vldrwq_z_s32(pSrcB, p);
/* Tail */ accReal = vrmlsldavhaq_p(accReal, vecSrcA, vecSrcB, p);
blkCnt = (blockSize & 3) >> 1; accImag = vrmlaldavhaxq_p(accImag, vecSrcA, vecSrcB, p);
while (blkCnt > 0U) blkCnt -= 4;
{ }
a0 = *pSrcA++; while ((int32_t) blkCnt > 0);
b0 = *pSrcA++; } else {
c0 = *pSrcB++; blkCnt = numSamples * CMPLX_DIM;
d0 = *pSrcB++; while (blkCnt > 0) {
mve_pred16_t p = vctp32q(blkCnt);
accReal += ((q63_t)a0 * c0) >> 14; vecSrcA = vldrwq_z_s32(pSrcA, p);
accImag += ((q63_t)a0 * d0) >> 14; vecSrcB = vldrwq_z_s32(pSrcB, p);
accReal -= ((q63_t)b0 * d0) >> 14;
accImag += ((q63_t)b0 * c0) >> 14;
/* Decrement loop counter */ accReal = vrmlsldavhaq_p(accReal, vecSrcA, vecSrcB, p);
blkCnt--; accImag = vrmlaldavhaxq_p(accImag, vecSrcA, vecSrcB, p);
/*
* Decrement the blkCnt loop counter
* Advance vector source and destination pointers
*/
pSrcA += 4;
pSrcB += 4;
blkCnt -= 4;
}
} }
*realResult = asrl(accReal, (14 - 8));
*imagResult = asrl(accImag, (14 - 8));
/* Store real and imaginary result in destination buffer. */
*realResult = accReal;
*imagResult = accImag;
} }
#else #else

@ -78,51 +78,105 @@ void arm_cmplx_mult_cmplx_f16(
float16_t * pDst, float16_t * pDst,
uint32_t numSamples) uint32_t numSamples)
{ {
int32_t blkCnt; /* loop counters */ int32_t blkCnt;
int32_t blockSize = numSamples; f16x8_t vecSrcA, vecSrcB;
f16x8_t vecA; f16x8_t vecSrcC, vecSrcD;
f16x8_t vecB; f16x8_t vec_acc;
f16x8_t vecDst;
blkCnt = (numSamples >> 3);
blkCnt -= 1;
if (blkCnt > 0) {
/* should give more freedom to generate stall free code */
vecSrcA = vld1q(pSrcA);
vecSrcB = vld1q(pSrcB);
pSrcA += 8;
pSrcB += 8;
blkCnt = blockSize * CMPLX_DIM; while (blkCnt > 0) {
blkCnt = blkCnt >> 3; vec_acc = vcmulq(vecSrcA, vecSrcB);
vecSrcC = vld1q(pSrcA);
pSrcA += 8;
while (blkCnt > 0) vec_acc = vcmlaq_rot90(vec_acc, vecSrcA, vecSrcB);
{ vecSrcD = vld1q(pSrcB);
vecA = vldrhq_f16(pSrcA); pSrcB += 8;
vecB = vldrhq_f16(pSrcB); vst1q(pDst, vec_acc);
/* C[2 * i] = A[2 * i] * B[2 * i] - A[2 * i + 1] * B[2 * i + 1]. */ pDst += 8;
vecDst = vcmulq(vecA, vecB);
/* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i]. */
vecDst = vcmlaq_rot90(vecDst, vecA, vecB);
vstrhq_f16(pDst, vecDst);
vec_acc = vcmulq(vecSrcC, vecSrcD);
vecSrcA = vld1q(pSrcA);
pSrcA += 8;
vec_acc = vcmlaq_rot90(vec_acc, vecSrcC, vecSrcD);
vecSrcB = vld1q(pSrcB);
pSrcB += 8;
vst1q(pDst, vec_acc);
pDst += 8;
/*
* Decrement the blockSize loop counter
*/
blkCnt--; blkCnt--;
}
/* process last elements out of the loop avoid the armclang breaking the SW pipeline */
vec_acc = vcmulq(vecSrcA, vecSrcB);
vecSrcC = vld1q(pSrcA);
vec_acc = vcmlaq_rot90(vec_acc, vecSrcA, vecSrcB);
vecSrcD = vld1q(pSrcB);
vst1q(pDst, vec_acc);
pDst += 8;
vec_acc = vcmulq(vecSrcC, vecSrcD);
vec_acc = vcmlaq_rot90(vec_acc, vecSrcC, vecSrcD);
vst1q(pDst, vec_acc);
pDst += 8;
/*
* tail
*/
blkCnt = CMPLX_DIM * (numSamples & 7);
while (blkCnt > 0) {
mve_pred16_t p = vctp16q(blkCnt);
pSrcA += 8; pSrcA += 8;
pSrcB += 8; pSrcB += 8;
vecSrcA = vldrhq_z_f16(pSrcA, p);
vecSrcB = vldrhq_z_f16(pSrcB, p);
vec_acc = vcmulq_m(vuninitializedq_f16(),vecSrcA, vecSrcB, p);
vec_acc = vcmlaq_rot90_m(vec_acc, vecSrcA, vecSrcB, p);
vstrhq_p_f16(pDst, vec_acc, p);
pDst += 8; pDst += 8;
blkCnt -= 8;
} }
} else {
/* small vector */
blkCnt = numSamples * CMPLX_DIM;
_Float16 a, b, c, d; /* Temporary variables to store real and imaginary values */ do {
/* Tail */ mve_pred16_t p = vctp16q(blkCnt);
blkCnt = (blockSize & 7) >> 1;
while (blkCnt > 0)
{
/* C[2 * i ] = A[2 * i] * B[2 * i ] - A[2 * i + 1] * B[2 * i + 1]. */
/* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i ]. */
a = *pSrcA++; vecSrcA = vldrhq_z_f16(pSrcA, p);
b = *pSrcA++; vecSrcB = vldrhq_z_f16(pSrcB, p);
c = *pSrcB++;
d = *pSrcB++;
/* store result in destination buffer. */ vec_acc = vcmulq_m(vuninitializedq_f16(),vecSrcA, vecSrcB, p);
*pDst++ = (a * c) - (b * d); vec_acc = vcmlaq_rot90_m(vec_acc, vecSrcA, vecSrcB, p);
*pDst++ = (a * d) + (b * c); vstrhq_p_f16(pDst, vec_acc, p);
pDst += 8;
/* Decrement loop counter */ /*
blkCnt--; * Decrement the blkCnt loop counter
* Advance vector source and destination pointers
*/
pSrcA += 8;
pSrcB += 8;
blkCnt -= 8;
} }
while (blkCnt > 0);
}
} }

@ -76,54 +76,104 @@ void arm_cmplx_mult_cmplx_f32(
float32_t * pDst, float32_t * pDst,
uint32_t numSamples) uint32_t numSamples)
{ {
uint32_t blkCnt; /* loop counters */ int32_t blkCnt;
uint32_t blockSize = numSamples; /* loop counters */ f32x4_t vecSrcA, vecSrcB;
float32_t a, b, c, d; /* Temporary variables to store real and imaginary values */ f32x4_t vecSrcC, vecSrcD;
f32x4_t vec_acc;
blkCnt = numSamples >> 2;
blkCnt -= 1;
if (blkCnt > 0) {
/* should give more freedom to generate stall free code */
vecSrcA = vld1q(pSrcA);
vecSrcB = vld1q(pSrcB);
pSrcA += 4;
pSrcB += 4;
while (blkCnt > 0) {
vec_acc = vcmulq(vecSrcA, vecSrcB);
vecSrcC = vld1q(pSrcA);
pSrcA += 4;
vec_acc = vcmlaq_rot90(vec_acc, vecSrcA, vecSrcB);
vecSrcD = vld1q(pSrcB);
pSrcB += 4;
vst1q(pDst, vec_acc);
pDst += 4;
vec_acc = vcmulq(vecSrcC, vecSrcD);
vecSrcA = vld1q(pSrcA);
pSrcA += 4;
vec_acc = vcmlaq_rot90(vec_acc, vecSrcC, vecSrcD);
vecSrcB = vld1q(pSrcB);
pSrcB += 4;
vst1q(pDst, vec_acc);
pDst += 4;
/*
* Decrement the blockSize loop counter
*/
blkCnt--;
}
f32x4x2_t vecA; /* process last elements out of the loop avoid the armclang breaking the SW pipeline */
f32x4x2_t vecB; vec_acc = vcmulq(vecSrcA, vecSrcB);
f32x4x2_t vecDst; vecSrcC = vld1q(pSrcA);
/* Compute 4 complex outputs at a time */ vec_acc = vcmlaq_rot90(vec_acc, vecSrcA, vecSrcB);
blkCnt = blockSize >> 2; vecSrcD = vld1q(pSrcB);
while (blkCnt > 0U) vst1q(pDst, vec_acc);
{ pDst += 4;
vecA = vld2q(pSrcA); // load & separate real/imag pSrcA (de-interleave 2)
vecB = vld2q(pSrcB); // load & separate real/imag pSrcB
pSrcA += 8;
pSrcB += 8;
/* C[2 * i] = A[2 * i] * B[2 * i] - A[2 * i + 1] * B[2 * i + 1]. */ vec_acc = vcmulq(vecSrcC, vecSrcD);
vecDst.val[0] = vmulq(vecA.val[0], vecB.val[0]); vec_acc = vcmlaq_rot90(vec_acc, vecSrcC, vecSrcD);
vecDst.val[0] = vfmsq(vecDst.val[0],vecA.val[1], vecB.val[1]); vst1q(pDst, vec_acc);
/* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i]. */ pDst += 4;
vecDst.val[1] = vmulq(vecA.val[0], vecB.val[1]);
vecDst.val[1] = vfmaq(vecDst.val[1], vecA.val[1], vecB.val[0]);
vst2q(pDst, vecDst); /*
pDst += 8; * tail
*/
blkCnt = CMPLX_DIM * (numSamples & 3);
while (blkCnt > 0) {
mve_pred16_t p = vctp32q(blkCnt);
pSrcA += 4;
pSrcB += 4;
blkCnt--; vecSrcA = vldrwq_z_f32(pSrcA, p);
vecSrcB = vldrwq_z_f32(pSrcB, p);
vec_acc = vcmulq_m(vuninitializedq_f32(),vecSrcA, vecSrcB, p);
vec_acc = vcmlaq_rot90_m(vec_acc, vecSrcA, vecSrcB, p);
vstrwq_p_f32(pDst, vec_acc, p);
pDst += 4;
blkCnt -= 4;
} }
} else {
/* small vector */
blkCnt = numSamples * CMPLX_DIM;
vec_acc = vdupq_n_f32(0.0f);
/* Tail */ do {
blkCnt = blockSize & 3; mve_pred16_t p = vctp32q(blkCnt);
while (blkCnt > 0U)
{
/* C[2 * i ] = A[2 * i] * B[2 * i ] - A[2 * i + 1] * B[2 * i + 1]. */
/* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i ]. */
a = *pSrcA++; vecSrcA = vldrwq_z_f32(pSrcA, p);
b = *pSrcA++; vecSrcB = vldrwq_z_f32(pSrcB, p);
c = *pSrcB++;
d = *pSrcB++;
/* store result in destination buffer. */ vec_acc = vcmulq_m(vuninitializedq_f32(),vecSrcA, vecSrcB, p);
*pDst++ = (a * c) - (b * d); vec_acc = vcmlaq_rot90_m(vec_acc, vecSrcA, vecSrcB, p);
*pDst++ = (a * d) + (b * c); vstrwq_p_f32(pDst, vec_acc, p);
pDst += 4;
/* Decrement loop counter */ /*
blkCnt--; * Decrement the blkCnt loop counter
* Advance vector source and destination pointers
*/
pSrcA += 4;
pSrcB += 4;
blkCnt -= 4;
}
while (blkCnt > 0);
} }
} }

@ -57,54 +57,116 @@ void arm_cmplx_mult_cmplx_q15(
q15_t * pDst, q15_t * pDst,
uint32_t numSamples) uint32_t numSamples)
{ {
uint32_t blkCnt; /* loop counters */ int32_t blkCnt;
uint32_t blockSize = numSamples * CMPLX_DIM; /* loop counters */ q15x8_t vecSrcA, vecSrcB;
q15_t a, b, c, d; q15x8_t vecSrcC, vecSrcD;
q15x8_t vecA;
q15x8_t vecB;
q15x8_t vecDst; q15x8_t vecDst;
blkCnt = blockSize >> 3; blkCnt = (numSamples >> 3);
while (blkCnt > 0U) blkCnt -= 1;
if (blkCnt > 0)
{ {
vecA = vld1q(pSrcA); /* should give more freedom to generate stall free code */
vecB = vld1q(pSrcB); vecSrcA = vld1q(pSrcA);
vecSrcB = vld1q(pSrcB);
pSrcA += 8;
pSrcB += 8;
while (blkCnt > 0)
{
/* C[2 * i] = A[2 * i] * B[2 * i] - A[2 * i + 1] * B[2 * i + 1]. */ /* C[2 * i] = A[2 * i] * B[2 * i] - A[2 * i + 1] * B[2 * i + 1]. */
vecDst = vqdmlsdhq_s16(vuninitializedq_s16(), vecA, vecB); vecDst = vqdmlsdhq(vuninitializedq_s16(), vecSrcA, vecSrcB);
/* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i]. */ vecSrcC = vld1q(pSrcA);
vecDst = vqdmladhxq_s16(vecDst, vecA, vecB); pSrcA += 8;
vecDst = vshrq(vecDst, 2); /* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i]. */
vecDst = vqdmladhxq(vecDst, vecSrcA, vecSrcB);
vecSrcD = vld1q(pSrcB);
pSrcB += 8;
vst1q(pDst, vecDst); vstrhq_s16(pDst, vshrq(vecDst, 2));
pDst += 8;
blkCnt --; vecDst = vqdmlsdhq(vuninitializedq_s16(), vecSrcC, vecSrcD);
vecSrcA = vld1q(pSrcA);
pSrcA += 8; pSrcA += 8;
vecDst = vqdmladhxq(vecDst, vecSrcC, vecSrcD);
vecSrcB = vld1q(pSrcB);
pSrcB += 8; pSrcB += 8;
vstrhq_s16(pDst, vshrq(vecDst, 2));
pDst += 8;
/*
* Decrement the blockSize loop counter
*/
blkCnt--;
}
/* process last elements out of the loop avoid the armclang breaking the SW pipeline */
vecDst = vqdmlsdhq(vuninitializedq_s16(), vecSrcA, vecSrcB);
vecSrcC = vld1q(pSrcA);
vecDst = vqdmladhxq(vecDst, vecSrcA, vecSrcB);
vecSrcD = vld1q(pSrcB);
vstrhq_s16(pDst, vshrq(vecDst, 2));
pDst += 8;
vecDst = vqdmlsdhq(vuninitializedq_s16(), vecSrcC, vecSrcD);
vecDst = vqdmladhxq(vecDst, vecSrcC, vecSrcD);
vstrhq_s16(pDst, vshrq(vecDst, 2));
pDst += 8; pDst += 8;
};
/* /*
* tail * tail
*/ */
blkCnt = (blockSize & 7) >> 1; blkCnt = CMPLX_DIM * (numSamples & 7);
while (blkCnt > 0U) do
{ {
/* C[2 * i ] = A[2 * i] * B[2 * i ] - A[2 * i + 1] * B[2 * i + 1]. */ mve_pred16_t p = vctp16q(blkCnt);
/* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i ]. */
a = *pSrcA++; pSrcA += 8;
b = *pSrcA++; pSrcB += 8;
c = *pSrcB++;
d = *pSrcB++;
/* store result in 3.13 format in destination buffer. */ vecSrcA = vldrhq_z_s16(pSrcA, p);
*pDst++ = (q15_t) ( (((q31_t) a * c) >> 17) - (((q31_t) b * d) >> 17) ); vecSrcB = vldrhq_z_s16(pSrcB, p);
*pDst++ = (q15_t) ( (((q31_t) a * d) >> 17) + (((q31_t) b * c) >> 17) );
/* Decrement loop counter */ vecDst = vqdmlsdhq_m(vuninitializedq_s16(), vecSrcA, vecSrcB, p);
blkCnt--; vecDst = vqdmladhxq_m(vecDst, vecSrcA, vecSrcB, p);
vecDst = vshrq_m(vuninitializedq_s16(), vecDst, 2, p);
vstrhq_p_s16(pDst, vecDst, p);
pDst += 8;
blkCnt -= 8;
}
while ((int32_t) blkCnt > 0);
}
else
{
blkCnt = numSamples * CMPLX_DIM;
while (blkCnt > 0) {
mve_pred16_t p = vctp16q(blkCnt);
vecSrcA = vldrhq_z_s16(pSrcA, p);
vecSrcB = vldrhq_z_s16(pSrcB, p);
vecDst = vqdmlsdhq_m(vuninitializedq_s16(), vecSrcA, vecSrcB, p);
vecDst = vqdmladhxq_m(vecDst, vecSrcA, vecSrcB, p);
vecDst = vshrq_m(vuninitializedq_s16(), vecDst, 2, p);
vstrhq_p_s16(pDst, vecDst, p);
pDst += 8;
pSrcA += 8;
pSrcB += 8;
blkCnt -= 8;
}
} }
} }
#else #else

@ -57,52 +57,111 @@ void arm_cmplx_mult_cmplx_q31(
q31_t * pDst, q31_t * pDst,
uint32_t numSamples) uint32_t numSamples)
{ {
int32_t blkCnt;
uint32_t blkCnt; /* loop counters */ q31x4_t vecSrcA, vecSrcB;
uint32_t blockSize = numSamples * CMPLX_DIM; /* loop counters */ q31x4_t vecSrcC, vecSrcD;
q31x4_t vecA;
q31x4_t vecB;
q31x4_t vecDst; q31x4_t vecDst;
q31_t a, b, c, d; /* Temporary variables */
/* Compute 2 complex outputs at a time */ blkCnt = numSamples >> 2;
blkCnt = blockSize >> 2; blkCnt -= 1;
while (blkCnt > 0U) if (blkCnt > 0) {
{ /* should give more freedom to generate stall free code */
vecSrcA = vld1q(pSrcA);
vecSrcB = vld1q(pSrcB);
pSrcA += 4;
pSrcB += 4;
while (blkCnt > 0) {
vecA = vld1q(pSrcA);
vecB = vld1q(pSrcB);
/* C[2 * i] = A[2 * i] * B[2 * i] - A[2 * i + 1] * B[2 * i + 1]. */ /* C[2 * i] = A[2 * i] * B[2 * i] - A[2 * i + 1] * B[2 * i + 1]. */
vecDst = vqdmlsdhq(vuninitializedq_s32(),vecA, vecB); vecDst = vqdmlsdhq(vuninitializedq_s32(), vecSrcA, vecSrcB);
vecSrcC = vld1q(pSrcA);
pSrcA += 4;
/* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i]. */ /* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i]. */
vecDst = vqdmladhxq(vecDst, vecA, vecB); vecDst = vqdmladhxq(vecDst, vecSrcA, vecSrcB);
vecSrcD = vld1q(pSrcB);
pSrcB += 4;
vecDst = vshrq(vecDst, 2); vst1q(pDst, vshrq(vecDst, 2));
vst1q(pDst, vecDst); pDst += 4;
blkCnt --; vecDst = vqdmlsdhq(vuninitializedq_s32(), vecSrcC, vecSrcD);
vecSrcA = vld1q(pSrcA);
pSrcA += 4; pSrcA += 4;
vecDst = vqdmladhxq(vecDst, vecSrcC, vecSrcD);
vecSrcB = vld1q(pSrcB);
pSrcB += 4; pSrcB += 4;
vst1q(pDst, vshrq(vecDst, 2));
pDst += 4; pDst += 4;
};
blkCnt = (blockSize & 3) >> 1; /*
while (blkCnt > 0U) * Decrement the blockSize loop counter
{ */
/* C[2 * i ] = A[2 * i] * B[2 * i ] - A[2 * i + 1] * B[2 * i + 1]. */ blkCnt--;
/* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i ]. */ }
a = *pSrcA++; /* process last elements out of the loop avoid the armclang breaking the SW pipeline */
b = *pSrcA++; vecDst = vqdmlsdhq(vuninitializedq_s32(), vecSrcA, vecSrcB);
c = *pSrcB++; vecSrcC = vld1q(pSrcA);
d = *pSrcB++;
/* store result in 3.29 format in destination buffer. */ vecDst = vqdmladhxq(vecDst, vecSrcA, vecSrcB);
*pDst++ = (q31_t) ( (((q63_t) a * c) >> 33) - (((q63_t) b * d) >> 33) ); vecSrcD = vld1q(pSrcB);
*pDst++ = (q31_t) ( (((q63_t) a * d) >> 33) + (((q63_t) b * c) >> 33) );
/* Decrement loop counter */ vst1q(pDst, vshrq(vecDst, 2));
blkCnt--; pDst += 4;
vecDst = vqdmlsdhq(vuninitializedq_s32(), vecSrcC, vecSrcD);
vecDst = vqdmladhxq(vecDst, vecSrcC, vecSrcD);
vst1q(pDst, vshrq(vecDst, 2));
pDst += 4;
/*
* tail
*/
blkCnt = CMPLX_DIM * (numSamples & 3);
do {
mve_pred16_t p = vctp32q(blkCnt);
pSrcA += 4;
pSrcB += 4;
vecSrcA = vldrwq_z_s32(pSrcA, p);
vecSrcB = vldrwq_z_s32(pSrcB, p);
vecDst = vqdmlsdhq_m(vuninitializedq_s32(), vecSrcA, vecSrcB, p);
vecDst = vqdmladhxq_m(vecDst, vecSrcA, vecSrcB, p);
vecDst = vshrq_m(vuninitializedq_s32(), vecDst, 2, p);
vstrwq_p_s32(pDst, vecDst, p);
pDst += 4;
blkCnt -= 4;
}
while ((int32_t) blkCnt > 0);
} else {
blkCnt = numSamples * CMPLX_DIM;
while (blkCnt > 0) {
mve_pred16_t p = vctp32q(blkCnt);
vecSrcA = vldrwq_z_s32(pSrcA, p);
vecSrcB = vldrwq_z_s32(pSrcB, p);
vecDst = vqdmlsdhq_m(vuninitializedq_s32(), vecSrcA, vecSrcB, p);
vecDst = vqdmladhxq_m(vecDst, vecSrcA, vecSrcB, p);
vecDst = vshrq_m(vuninitializedq_s32(), vecDst, 2, p);
vstrwq_p_s32(pDst, vecDst, p);
pDst += 4;
pSrcA += 4;
pSrcB += 4;
blkCnt -= 4;
}
} }
} }
#else #else

@ -11,7 +11,7 @@
#define ABS_32x64_ERROR_Q31 ((q31_t)25) #define ABS_32x64_ERROR_Q31 ((q31_t)25)
void checkInnerTail(q31_t *b) static void checkInnerTail(q31_t *b)
{ {
ASSERT_TRUE(b[0] == 0); ASSERT_TRUE(b[0] == 0);
ASSERT_TRUE(b[1] == 0); ASSERT_TRUE(b[1] == 0);

@ -16,7 +16,7 @@ a double precision computation.
static __ALIGNED(8) float16_t coeffArray[32]; static __ALIGNED(8) float16_t coeffArray[32];
#endif #endif
void checkInnerTail(float16_t *b) static void checkInnerTail(float16_t *b)
{ {
ASSERT_TRUE(b[0] == 0.0f); ASSERT_TRUE(b[0] == 0.0f);
ASSERT_TRUE(b[1] == 0.0f); ASSERT_TRUE(b[1] == 0.0f);

@ -16,7 +16,7 @@ a double precision computation.
static __ALIGNED(8) float32_t coeffArray[32]; static __ALIGNED(8) float32_t coeffArray[32];
#endif #endif
void checkInnerTail(float32_t *b) static void checkInnerTail(float32_t *b)
{ {
ASSERT_TRUE(b[0] == 0.0f); ASSERT_TRUE(b[0] == 0.0f);
ASSERT_TRUE(b[1] == 0.0f); ASSERT_TRUE(b[1] == 0.0f);

@ -10,7 +10,7 @@
static __ALIGNED(8) q15_t coeffArray[32]; static __ALIGNED(8) q15_t coeffArray[32];
#endif #endif
void checkInnerTail(q15_t *b) static void checkInnerTail(q15_t *b)
{ {
ASSERT_TRUE(b[0] == 0); ASSERT_TRUE(b[0] == 0);
ASSERT_TRUE(b[1] == 0); ASSERT_TRUE(b[1] == 0);

@ -10,7 +10,7 @@
static __ALIGNED(8) q31_t coeffArray[32]; static __ALIGNED(8) q31_t coeffArray[32];
#endif #endif
void checkInnerTail(q31_t *b) static void checkInnerTail(q31_t *b)
{ {
ASSERT_TRUE(b[0] == 0); ASSERT_TRUE(b[0] == 0);
ASSERT_TRUE(b[1] == 0); ASSERT_TRUE(b[1] == 0);

@ -10,7 +10,7 @@
static __ALIGNED(8) q7_t coeffArray[32]; static __ALIGNED(8) q7_t coeffArray[32];
#endif #endif
void checkInnerTail(q7_t *b) static void checkInnerTail(q7_t *b)
{ {
ASSERT_TRUE(b[0] == 0); ASSERT_TRUE(b[0] == 0);
ASSERT_TRUE(b[1] == 0); ASSERT_TRUE(b[1] == 0);

Loading…
Cancel
Save