[libpng16] Fix compilation errors in filter_neon_intrinsics.c

Use filter_neon_intrinsics.c in preference to filter_neon.S when possible.
This commit is contained in:
John Bowler 2013-10-15 21:19:56 -05:00 committed by Glenn Randers-Pehrson
parent 85c2ec9a69
commit 83a841ab7c
4 changed files with 119 additions and 72 deletions

View File

@ -81,12 +81,8 @@ libpng@PNGLIB_MAJOR@@PNGLIB_MINOR@_la_SOURCES = png.c pngerror.c\
png.h pngconf.h pngdebug.h pnginfo.h pngpriv.h pngstruct.h pngusr.dfa png.h pngconf.h pngdebug.h pnginfo.h pngpriv.h pngstruct.h pngusr.dfa
if PNG_ARM_NEON if PNG_ARM_NEON
libpng@PNGLIB_MAJOR@@PNGLIB_MINOR@_la_SOURCES += arm/arm_init.c \ libpng@PNGLIB_MAJOR@@PNGLIB_MINOR@_la_SOURCES += arm/arm_init.c\
if PNG_ARM_NEON_INTRINSICS arm/filter_neon.S arm/filter_neon_intrinsics.c
arm/filter_neon_intrinsics.c
else
arm/filter_neon.S
endif
endif endif
nodist_libpng@PNGLIB_MAJOR@@PNGLIB_MINOR@_la_SOURCES = pnglibconf.h nodist_libpng@PNGLIB_MAJOR@@PNGLIB_MINOR@_la_SOURCES = pnglibconf.h

View File

@ -20,6 +20,12 @@
.section .note.GNU-stack,"",%progbits /* mark stack as non-executable */ .section .note.GNU-stack,"",%progbits /* mark stack as non-executable */
#endif #endif
/* Assembler NEON support - only works for 32-bit ARM (i.e. it does not work for
* ARM64). The code in arm/filter_neon_intrinsics.c supports ARM64, however it
* only works if -mfpu=neon is specified on the GCC command line.
*/
#ifndef __ARM_NEON__ /* else use arm/filter_neon_intrinsics.c */
#ifdef PNG_READ_SUPPORTED #ifdef PNG_READ_SUPPORTED
#if PNG_ARM_NEON_OPT > 0 #if PNG_ARM_NEON_OPT > 0
@ -235,3 +241,4 @@ func png_read_filter_row_paeth3_neon, export=1
endfunc endfunc
#endif /* PNG_ARM_NEON_OPT > 0 */ #endif /* PNG_ARM_NEON_OPT > 0 */
#endif /* PNG_READ_SUPPORTED */ #endif /* PNG_READ_SUPPORTED */
#endif /* !__ARM_NEON__ */

View File

@ -14,11 +14,30 @@
#include "pngpriv.h" #include "pngpriv.h"
#ifdef PNG_READ_SUPPORTED /* This code requires -mfpu=neon on the command line: */
#if PNG_ARM_NEON_OPT > 0 #ifdef __ARM_NEON__ /* else use arm/filter_neon.S */
#include <arm_neon.h> #include <arm_neon.h>
/* libpng row pointers are not necessarily aligned to any particular boundary,
* however this code will only work with appropriate alignment. arm/arm_init.c
* checks for this (and will not compile unless it is done), this code uses
* variants of png_aligncast to avoid compiler warnings.
*/
#define png_ptr(type,pointer) png_aligncast(type *,pointer)
#define png_ptrc(type,pointer) png_aligncastconst(const type *,pointer)
/* The following relies on a variable 'temp_pointer' being declared with type
* 'type'. This is written this way just to hide the GCC strict aliasing
* warning; note that the code is safe because there never is an alias between
* the input and output pointers.
*/
#define png_ldr(type,pointer)\
(temp_pointer = png_ptr(type,pointer), *temp_pointer)
#ifdef PNG_READ_SUPPORTED
#if PNG_ARM_NEON_OPT > 0
void void
png_read_filter_row_up_neon(png_row_infop row_info, png_bytep row, png_read_filter_row_up_neon(png_row_infop row_info, png_bytep row,
png_const_bytep prev_row) png_const_bytep prev_row)
@ -27,10 +46,10 @@ png_read_filter_row_up_neon(png_row_infop row_info, png_bytep row,
png_bytep rp_stop = row + row_info->rowbytes; png_bytep rp_stop = row + row_info->rowbytes;
png_const_bytep pp = prev_row; png_const_bytep pp = prev_row;
uint8x16_t qrp, qpp;
for (; rp != rp_stop; rp += 16, pp += 16) for (; rp != rp_stop; rp += 16, pp += 16)
{ {
uint8x16_t qrp, qpp;
qrp = vld1q_u8(rp); qrp = vld1q_u8(rp);
qpp = vld1q_u8(pp); qpp = vld1q_u8(pp);
qrp = vaddq_u8(qrp, qpp); qrp = vaddq_u8(qrp, qpp);
@ -38,24 +57,25 @@ png_read_filter_row_up_neon(png_row_infop row_info, png_bytep row,
} }
} }
void inline void
png_read_filter_row_sub3_neon(png_row_infop row_info, png_bytep row, png_read_filter_row_sub3_neon(png_row_infop row_info, png_bytep row,
png_const_bytep prev_row) png_const_bytep prev_row)
{ {
png_bytep rp = row; png_bytep rp = row;
png_bytep rp_stop = row + row_info->rowbytes; png_bytep rp_stop = row + row_info->rowbytes;
PNG_UNUSED(prev_row) uint8x16_t vtmp = vld1q_u8(rp);
uint8x8x2_t *vrpt = png_ptr(uint8x8x2_t, &vtmp);
uint8x8x2_t vrp = *vrpt;
uint8x8_t vtmp1, vtmp2;
uint8x8x4_t vdest; uint8x8x4_t vdest;
vdest.val[3] = vdup_n_u8(0); vdest.val[3] = vdup_n_u8(0);
uint8x16_t vtmp = vld1q_u8(rp);
uint8x8x2_t vrp = *((uint8x8x2_t *)(&vtmp));
for (; rp != rp_stop;) for (; rp != rp_stop;)
{ {
uint8x8_t vtmp1, vtmp2;
uint32x2_t *temp_pointer;
vtmp1 = vext_u8(vrp.val[0], vrp.val[1], 3); vtmp1 = vext_u8(vrp.val[0], vrp.val[1], 3);
vdest.val[0] = vadd_u8(vdest.val[3], vrp.val[0]); vdest.val[0] = vadd_u8(vdest.val[3], vrp.val[0]);
vtmp2 = vext_u8(vrp.val[0], vrp.val[1], 6); vtmp2 = vext_u8(vrp.val[0], vrp.val[1], 6);
@ -66,17 +86,20 @@ png_read_filter_row_sub3_neon(png_row_infop row_info, png_bytep row,
vdest.val[3] = vadd_u8(vdest.val[2], vtmp1); vdest.val[3] = vadd_u8(vdest.val[2], vtmp1);
vtmp = vld1q_u8(rp + 12); vtmp = vld1q_u8(rp + 12);
vrp = *((uint8x8x2_t *)(&vtmp)); vrpt = png_ptr(uint8x8x2_t, &vtmp);
vrp = *vrpt;
vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[0])), 0); vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[0]), 0);
rp += 3; rp += 3;
vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[1])), 0); vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[1]), 0);
rp += 3; rp += 3;
vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[2])), 0); vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[2]), 0);
rp += 3; rp += 3;
vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[3])), 0); vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[3]), 0);
rp += 3; rp += 3;
} }
PNG_UNUSED(prev_row)
} }
void void
@ -86,22 +109,24 @@ png_read_filter_row_sub4_neon(png_row_infop row_info, png_bytep row,
png_bytep rp = row; png_bytep rp = row;
png_bytep rp_stop = row + row_info->rowbytes; png_bytep rp_stop = row + row_info->rowbytes;
PNG_UNUSED(prev_row)
uint8x8x4_t vdest; uint8x8x4_t vdest;
vdest.val[3] = vdup_n_u8(0); vdest.val[3] = vdup_n_u8(0);
for (; rp != rp_stop; rp += 16) for (; rp != rp_stop; rp += 16)
{ {
uint32x2x4_t vtmp = vld4_u32((uint32_t *)rp); uint32x2x4_t vtmp = vld4_u32(png_ptr(uint32_t,rp));
uint8x8x4_t vrp = *((uint8x8x4_t *)(&vtmp)); uint8x8x4_t *vrpt = png_ptr(uint8x8x4_t,&vtmp);
uint8x8x4_t vrp = *vrpt;
uint32x2x4_t *temp_pointer;
vdest.val[0] = vadd_u8(vdest.val[3], vrp.val[0]); vdest.val[0] = vadd_u8(vdest.val[3], vrp.val[0]);
vdest.val[1] = vadd_u8(vdest.val[0], vrp.val[1]); vdest.val[1] = vadd_u8(vdest.val[0], vrp.val[1]);
vdest.val[2] = vadd_u8(vdest.val[1], vrp.val[2]); vdest.val[2] = vadd_u8(vdest.val[1], vrp.val[2]);
vdest.val[3] = vadd_u8(vdest.val[2], vrp.val[3]); vdest.val[3] = vadd_u8(vdest.val[2], vrp.val[3]);
vst4_lane_u32((uint32_t *)rp, *((uint32x2x4_t *)(&vdest)), 0); vst4_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2x4_t,&vdest), 0);
} }
PNG_UNUSED(prev_row)
} }
void void
@ -113,18 +138,27 @@ png_read_filter_row_avg3_neon(png_row_infop row_info, png_bytep row,
png_bytep rp_stop = row + row_info->rowbytes; png_bytep rp_stop = row + row_info->rowbytes;
uint8x16_t vtmp; uint8x16_t vtmp;
uint8x8x2_t vrp, vpp; uint8x8x2_t *vrpt;
uint8x8_t vtmp1, vtmp2, vtmp3; uint8x8x2_t vrp;
uint8x8x4_t vdest; uint8x8x4_t vdest;
vdest.val[3] = vdup_n_u8(0); vdest.val[3] = vdup_n_u8(0);
vtmp = vld1q_u8(rp); vtmp = vld1q_u8(rp);
vrp = *((uint8x8x2_t *)(&vtmp)); vrpt = png_ptr(uint8x8x2_t,&vtmp);
vrp = *vrpt;
for (; rp != rp_stop; pp += 12) for (; rp != rp_stop; pp += 12)
{ {
uint8x8_t vtmp1, vtmp2, vtmp3;
uint8x8x2_t *vppt;
uint8x8x2_t vpp;
uint32x2_t *temp_pointer;
vtmp = vld1q_u8(pp); vtmp = vld1q_u8(pp);
vpp = *((uint8x8x2_t *)(&vtmp)); vppt = png_ptr(uint8x8x2_t,&vtmp);
vpp = *vppt;
vtmp1 = vext_u8(vrp.val[0], vrp.val[1], 3); vtmp1 = vext_u8(vrp.val[0], vrp.val[1], 3);
vdest.val[0] = vhadd_u8(vdest.val[3], vpp.val[0]); vdest.val[0] = vhadd_u8(vdest.val[3], vpp.val[0]);
@ -139,7 +173,8 @@ png_read_filter_row_avg3_neon(png_row_infop row_info, png_bytep row,
vtmp1 = vext_u8(vrp.val[1], vrp.val[1], 1); vtmp1 = vext_u8(vrp.val[1], vrp.val[1], 1);
vtmp = vld1q_u8(rp + 12); vtmp = vld1q_u8(rp + 12);
vrp = *((uint8x8x2_t *)(&vtmp)); vrpt = png_ptr(uint8x8x2_t,&vtmp);
vrp = *vrpt;
vdest.val[2] = vhadd_u8(vdest.val[1], vtmp2); vdest.val[2] = vhadd_u8(vdest.val[1], vtmp2);
vdest.val[2] = vadd_u8(vdest.val[2], vtmp3); vdest.val[2] = vadd_u8(vdest.val[2], vtmp3);
@ -149,13 +184,13 @@ png_read_filter_row_avg3_neon(png_row_infop row_info, png_bytep row,
vdest.val[3] = vhadd_u8(vdest.val[2], vtmp2); vdest.val[3] = vhadd_u8(vdest.val[2], vtmp2);
vdest.val[3] = vadd_u8(vdest.val[3], vtmp1); vdest.val[3] = vadd_u8(vdest.val[3], vtmp1);
vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[0])), 0); vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[0]), 0);
rp += 3; rp += 3;
vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[1])), 0); vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[1]), 0);
rp += 3; rp += 3;
vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[2])), 0); vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[2]), 0);
rp += 3; rp += 3;
vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[3])), 0); vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[3]), 0);
rp += 3; rp += 3;
} }
} }
@ -168,17 +203,22 @@ png_read_filter_row_avg4_neon(png_row_infop row_info, png_bytep row,
png_bytep rp_stop = row + row_info->rowbytes; png_bytep rp_stop = row + row_info->rowbytes;
png_const_bytep pp = prev_row; png_const_bytep pp = prev_row;
uint32x2x4_t vtmp;
uint8x8x4_t vrp, vpp;
uint8x8x4_t vdest; uint8x8x4_t vdest;
vdest.val[3] = vdup_n_u8(0); vdest.val[3] = vdup_n_u8(0);
for (; rp != rp_stop; rp += 16, pp += 16) for (; rp != rp_stop; rp += 16, pp += 16)
{ {
vtmp = vld4_u32((uint32_t *)rp); uint32x2x4_t vtmp;
vrp = *((uint8x8x4_t *)(&vtmp)); uint8x8x4_t *vrpt, *vppt;
vtmp = vld4_u32((uint32_t *)pp); uint8x8x4_t vrp, vpp;
vpp = *((uint8x8x4_t *)(&vtmp)); uint32x2x4_t *temp_pointer;
vtmp = vld4_u32(png_ptr(uint32_t,rp));
vrpt = png_ptr(uint8x8x4_t,&vtmp);
vrp = *vrpt;
vtmp = vld4_u32(png_ptrc(uint32_t,pp));
vppt = png_ptr(uint8x8x4_t,&vtmp);
vpp = *vppt;
vdest.val[0] = vhadd_u8(vdest.val[3], vpp.val[0]); vdest.val[0] = vhadd_u8(vdest.val[3], vpp.val[0]);
vdest.val[0] = vadd_u8(vdest.val[0], vrp.val[0]); vdest.val[0] = vadd_u8(vdest.val[0], vrp.val[0]);
@ -189,11 +229,11 @@ png_read_filter_row_avg4_neon(png_row_infop row_info, png_bytep row,
vdest.val[3] = vhadd_u8(vdest.val[2], vpp.val[3]); vdest.val[3] = vhadd_u8(vdest.val[2], vpp.val[3]);
vdest.val[3] = vadd_u8(vdest.val[3], vrp.val[3]); vdest.val[3] = vadd_u8(vdest.val[3], vrp.val[3]);
vst4_lane_u32((uint32_t *)rp, *((uint32x2x4_t *)(&vdest)), 0); vst4_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2x4_t,&vdest), 0);
} }
} }
uint8x8_t static uint8x8_t
paeth(uint8x8_t a, uint8x8_t b, uint8x8_t c) paeth(uint8x8_t a, uint8x8_t b, uint8x8_t c)
{ {
uint8x8_t d, e; uint8x8_t d, e;
@ -229,19 +269,26 @@ png_read_filter_row_paeth3_neon(png_row_infop row_info, png_bytep row,
png_bytep rp_stop = row + row_info->rowbytes; png_bytep rp_stop = row + row_info->rowbytes;
uint8x16_t vtmp; uint8x16_t vtmp;
uint8x8x2_t vrp, vpp; uint8x8x2_t *vrpt;
uint8x8_t vtmp1, vtmp2, vtmp3; uint8x8x2_t vrp;
uint8x8_t vlast = vdup_n_u8(0); uint8x8_t vlast = vdup_n_u8(0);
uint8x8x4_t vdest; uint8x8x4_t vdest;
vdest.val[3] = vdup_n_u8(0); vdest.val[3] = vdup_n_u8(0);
vtmp = vld1q_u8(rp); vtmp = vld1q_u8(rp);
vrp = *((uint8x8x2_t *)(&vtmp)); vrpt = png_ptr(uint8x8x2_t,&vtmp);
vrp = *vrpt;
for (; rp != rp_stop; pp += 12) for (; rp != rp_stop; pp += 12)
{ {
uint8x8x2_t *vppt;
uint8x8x2_t vpp;
uint8x8_t vtmp1, vtmp2, vtmp3;
uint32x2_t *temp_pointer;
vtmp = vld1q_u8(pp); vtmp = vld1q_u8(pp);
vpp = *((uint8x8x2_t *)(&vtmp)); vppt = png_ptr(uint8x8x2_t,&vtmp);
vpp = *vppt;
vdest.val[0] = paeth(vdest.val[3], vpp.val[0], vlast); vdest.val[0] = paeth(vdest.val[3], vpp.val[0], vlast);
vdest.val[0] = vadd_u8(vdest.val[0], vrp.val[0]); vdest.val[0] = vadd_u8(vdest.val[0], vrp.val[0]);
@ -260,20 +307,21 @@ png_read_filter_row_paeth3_neon(png_row_infop row_info, png_bytep row,
vtmp2 = vext_u8(vpp.val[1], vpp.val[1], 1); vtmp2 = vext_u8(vpp.val[1], vpp.val[1], 1);
vtmp = vld1q_u8(rp + 12); vtmp = vld1q_u8(rp + 12);
vrp = *((uint8x8x2_t *)(&vtmp)); vrpt = png_ptr(uint8x8x2_t,&vtmp);
vrp = *vrpt;
vdest.val[3] = paeth(vdest.val[2], vtmp2, vtmp3); vdest.val[3] = paeth(vdest.val[2], vtmp2, vtmp3);
vdest.val[3] = vadd_u8(vdest.val[3], vtmp1); vdest.val[3] = vadd_u8(vdest.val[3], vtmp1);
vlast = vtmp2; vlast = vtmp2;
vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[0])), 0); vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[0]), 0);
rp += 3; rp += 3;
vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[1])), 0); vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[1]), 0);
rp += 3; rp += 3;
vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[2])), 0); vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[2]), 0);
rp += 3; rp += 3;
vst1_lane_u32((uint32_t *)rp, *((uint32x2_t *)(&vdest.val[3])), 0); vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[3]), 0);
rp += 3; rp += 3;
} }
} }
@ -286,18 +334,23 @@ png_read_filter_row_paeth4_neon(png_row_infop row_info, png_bytep row,
png_bytep rp_stop = row + row_info->rowbytes; png_bytep rp_stop = row + row_info->rowbytes;
png_const_bytep pp = prev_row; png_const_bytep pp = prev_row;
uint32x2x4_t vtmp;
uint8x8x4_t vrp, vpp;
uint8x8_t vlast = vdup_n_u8(0); uint8x8_t vlast = vdup_n_u8(0);
uint8x8x4_t vdest; uint8x8x4_t vdest;
vdest.val[3] = vdup_n_u8(0); vdest.val[3] = vdup_n_u8(0);
for (; rp != rp_stop; rp += 16, pp += 16) for (; rp != rp_stop; rp += 16, pp += 16)
{ {
vtmp = vld4_u32((uint32_t *)rp); uint32x2x4_t vtmp;
vrp = *((uint8x8x4_t *)(&vtmp)); uint8x8x4_t *vrpt, *vppt;
vtmp = vld4_u32((uint32_t *)pp); uint8x8x4_t vrp, vpp;
vpp = *((uint8x8x4_t *)(&vtmp)); uint32x2x4_t *temp_pointer;
vtmp = vld4_u32(png_ptr(uint32_t,rp));
vrpt = png_ptr(uint8x8x4_t,&vtmp);
vrp = *vrpt;
vtmp = vld4_u32(png_ptrc(uint32_t,pp));
vppt = png_ptr(uint8x8x4_t,&vtmp);
vpp = *vppt;
vdest.val[0] = paeth(vdest.val[3], vpp.val[0], vlast); vdest.val[0] = paeth(vdest.val[3], vpp.val[0], vlast);
vdest.val[0] = vadd_u8(vdest.val[0], vrp.val[0]); vdest.val[0] = vadd_u8(vdest.val[0], vrp.val[0]);
@ -310,9 +363,10 @@ png_read_filter_row_paeth4_neon(png_row_infop row_info, png_bytep row,
vlast = vpp.val[3]; vlast = vpp.val[3];
vst4_lane_u32((uint32_t *)rp, *((uint32x2x4_t *)(&vdest)), 0); vst4_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2x4_t,&vdest), 0);
} }
} }
#endif /* PNG_ARM_NEON_OPT > 0 */ #endif /* PNG_ARM_NEON_OPT > 0 */
#endif /* PNG_READ_SUPPORTED */ #endif /* PNG_READ_SUPPORTED */
#endif /* __ARM_NEON__ */

View File

@ -248,13 +248,13 @@ AC_ARG_ENABLE([arm-neon],
[Disable ARM Neon optimizations]) [Disable ARM Neon optimizations])
# Prevent inclusion of the assembler files below: # Prevent inclusion of the assembler files below:
enable_arm_neon=no;; enable_arm_neon=no;;
check|check-intrinsics) check)
AC_DEFINE([PNG_ARM_NEON_CHECK_SUPPORTED], [], AC_DEFINE([PNG_ARM_NEON_CHECK_SUPPORTED], [],
[Check for ARM Neon support at run-time]);; [Check for ARM Neon support at run-time]);;
api|api-intrinsics) api)
AC_DEFINE([PNG_ARM_NEON_API_SUPPORTED], [], AC_DEFINE([PNG_ARM_NEON_API_SUPPORTED], [],
[Turn on ARM Neon optimizations at run-time]);; [Turn on ARM Neon optimizations at run-time]);;
yes|on|yes-intrinsics|on-intrinsics) yes|on)
AC_DEFINE([PNG_ARM_NEON_OPT], [2], AC_DEFINE([PNG_ARM_NEON_OPT], [2],
[Enable ARM Neon optimizations]) [Enable ARM Neon optimizations])
AC_MSG_WARN([--enable-arm-neon: please specify 'check' or 'api', if] AC_MSG_WARN([--enable-arm-neon: please specify 'check' or 'api', if]
@ -275,16 +275,6 @@ AM_CONDITIONAL([PNG_ARM_NEON],
*) test "$enable_arm_neon" != '';; *) test "$enable_arm_neon" != '';;
esac]) esac])
case "$enable_arm_neon" in
*-intrinsics)
use_arm_intrinsics='yes';;
*)
use_arm_intrinsics='no';;
esac
AM_CONDITIONAL([PNG_ARM_NEON_INTRINSICS],
[test "$use_arm_intrinsics" = 'yes'])
AC_MSG_NOTICE([[Extra options for compiler: $PNG_COPTS]]) AC_MSG_NOTICE([[Extra options for compiler: $PNG_COPTS]])
# Config files, substituting as above # Config files, substituting as above