endif()
check_cxx_compiler_flag("-mfpu=neon" CXX_HAS_MFPU_NEON)
check_c_compiler_flag("-mfpu=neon" C_HAS_MFPU_NEON)
- if(${CXX_HAS_MFPU_NEON} AND ${C_HAS_MFPU_NEON})
+ if(${CXX_HAS_MFPU_NEON} AND ${C_HAS_MFPU_NEON} AND NOT "${CMAKE_CXX_FLAGS} " MATCHES "-mfpu=neon[^ ]*")
get_target_property(old_flags "carotene_objs" COMPILE_FLAGS)
if(old_flags)
set_target_properties("carotene_objs" PROPERTIES COMPILE_FLAGS "${old_flags} -mfpu=neon")
srcStride == dst2Stride && \
srcStride == dst3Stride &&
-#if __GNUC__ == 4 && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
#define SPLIT_ASM2(sgn, bits) __asm__ ( \
"vld2." #bits " {d0, d2}, [%[in0]] \n\t" \
FILL_LINES##n(VST1Q, sgn##bits) \
}
-#endif // __GNUC__ == 4 && __GNUC_MINOR__ < 7
+#endif
#define SPLIT(sgn,bits,n) void split##n(const Size2D &_size, \
const sgn##bits * srcBase, ptrdiff_t srcStride \
} \
}
-#if __GNUC__ == 4 && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
#define ALPHA_QUAD(sgn, bits) { \
internal::prefetch(src + sj); \
vst1q_##sgn##bits(dst1 + d1j, vals.v4.val[3]); \
}
-#endif // __GNUC__ == 4 && __GNUC_MINOR__ < 7
+#endif
#define SPLIT4ALPHA(sgn,bits) void split4(const Size2D &_size, \
const sgn##bits * srcBase, ptrdiff_t srcStride, \
dstStride == src2Stride && \
dstStride == src3Stride &&
-#if __GNUC__ == 4 && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
#define MERGE_ASM2(sgn, bits) __asm__ ( \
"vld1." #bits " {d0-d1}, [%[in0]] \n\t" \
vst##n##q_##sgn##bits(dst + dj, v_dst); \
}
-#endif // __GNUC__ == 4 && __GNUC_MINOR__ < 7
+#endif
#define COMBINE(sgn,bits,n) void combine##n(const Size2D &_size \
FILL_LINES##n(FARG, sgn##bits), \
const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
size_t sj = 0u, dj = 0u;
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
for (; dj < roiw8; sj += 24, dj += 8)
{
internal::prefetch(src + sj);
const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
size_t sj = 0u, dj = 0u;
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
for (; dj < roiw8; sj += 32, dj += 8)
{
internal::prefetch(src + sj);
const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
size_t sj = 0u, dj = 0u;
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
for (; dj < roiw8; sj += 24, dj += 8)
{
internal::prefetch(src + sj);
const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
size_t sj = 0u, dj = 0u;
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
for (; dj < roiw8; sj += 32, dj += 8)
{
internal::prefetch(src + sj);
for (; sj < roiw16; sj += 16, dj += 48)
{
internal::prefetch(src + sj);
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
__asm__ (
"vld1.8 {d0-d1}, [%[in0]] \n\t"
"vmov.8 q1, q0 \n\t"
if (sj < roiw8)
{
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
__asm__ (
"vld1.8 {d0}, [%[in]] \n\t"
"vmov.8 d1, d0 \n\t"
size_t roiw16 = size.width >= 15 ? size.width - 15 : 0;
size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
register uint8x16_t vc255 asm ("q4") = vmovq_n_u8(255);
#else
uint8x16x4_t vRgba;
for (; sj < roiw16; sj += 16, dj += 64)
{
internal::prefetch(src + sj);
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
__asm__ (
"vld1.8 {d0-d1}, [%[in0]] \n\t"
"vmov.8 q1, q0 \n\t"
if (sj < roiw8)
{
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
__asm__ (
"vld1.8 {d5}, [%[in]] \n\t"
"vmov.8 d6, d5 \n\t"
"d24","d25","d26","d27","d28","d29","d30","d31" \
);
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
#define YCRCB_CONSTS \
register int16x4_t vcYR asm ("d31") = vmov_n_s16(4899); \
#define COEFF_G ( 8663)
#define COEFF_B (-17705)
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
#define YUV420ALPHA3_CONST
#define YUV420ALPHA4_CONST register uint8x16_t c255 asm ("q13") = vmovq_n_u8(255);
#define YUV420ALPHA3_CONVERT
#ifdef CAROTENE_NEON
size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
const s32 hsv_shift = 12;
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
register const f32 vsdiv_table = f32(255 << hsv_shift);
register f32 vhdiv_table = f32(hrange << hsv_shift);
register const s32 vhrange = hrange;
for (; j < roiw8; sj += 24, dj += 24, j += 8)
{
internal::prefetch(src + sj);
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CONVERT_TO_HSV_ASM(vld3.8 {d0-d2}, d0, d2)
#else
uint8x8x3_t vRgb = vld3_u8(src + sj);
#ifdef CAROTENE_NEON
size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
const s32 hsv_shift = 12;
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
register const f32 vsdiv_table = f32(255 << hsv_shift);
register f32 vhdiv_table = f32(hrange << hsv_shift);
register const s32 vhrange = hrange;
for (; j < roiw8; sj += 32, dj += 24, j += 8)
{
internal::prefetch(src + sj);
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CONVERT_TO_HSV_ASM(vld4.8 {d0-d3}, d0, d2)
#else
uint8x8x4_t vRgb = vld4_u8(src + sj);
#ifdef CAROTENE_NEON
size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
const s32 hsv_shift = 12;
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
register const f32 vsdiv_table = f32(255 << hsv_shift);
register f32 vhdiv_table = f32(hrange << hsv_shift);
register const s32 vhrange = hrange;
for (; j < roiw8; sj += 24, dj += 24, j += 8)
{
internal::prefetch(src + sj);
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CONVERT_TO_HSV_ASM(vld3.8 {d0-d2}, d2, d0)
#else
uint8x8x3_t vRgb = vld3_u8(src + sj);
#ifdef CAROTENE_NEON
size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
const s32 hsv_shift = 12;
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
register const f32 vsdiv_table = f32(255 << hsv_shift);
register f32 vhdiv_table = f32(hrange << hsv_shift);
register const s32 vhrange = hrange;
for (; j < roiw8; sj += 32, dj += 24, j += 8)
{
internal::prefetch(src + sj);
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CONVERT_TO_HSV_ASM(vld4.8 {d0-d3}, d2, d0)
#else
uint8x8x4_t vRgb = vld4_u8(src + sj);
for (; j < roiw16; sj += 64, dj += 32, j += 16)
{
internal::prefetch(src + sj);
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
__asm__ (
"vld4.8 {d2, d4, d6, d8}, [%[in0]] @ q0 q1 q2 q3 q4 \n\t"
"vld4.8 {d3, d5, d7, d9}, [%[in1]] @ xxxxxxxx rrrrRRRR ggggGGGG bbbbBBBB xxxxxxxx \n\t"
for (; j < roiw16; sj += 48, dj += 32, j += 16)
{
internal::prefetch(src + sj);
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
__asm__ (
"vld3.8 {d2, d4, d6}, [%[in0]] @ q0 q1 q2 q3 q4 \n\t"
"vld3.8 {d3, d5, d7}, [%[in1]] @ xxxxxxxx rrrrRRRR ggggGGGG bbbbBBBB xxxxxxxx \n\t"
for (; j < roiw16; sj += 64, dj += 32, j += 16)
{
internal::prefetch(src + sj);
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
__asm__ (
"vld4.8 {d0, d2, d4, d6}, [%[in0]] @ q0 q1 q2 q3 \n\t"
"vld4.8 {d1, d3, d5, d7}, [%[in1]] @ rrrrRRRR ggggGGGG bbbbBBBB aaaaAAAA \n\t"
for (; j < roiw16; sj += 48, dj += 32, j += 16)
{
internal::prefetch(src + sj);
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
__asm__ (
"vld3.8 {d0, d2, d4}, [%[in0]] @ q0 q1 q2 q3 \n\t"
"vld3.8 {d1, d3, d5}, [%[in1]] @ rrrrRRRR ggggGGGG bbbbBBBB xxxxxxxx \n\t"
for (; j < roiw8; sj += 24, dj += 24, j += 8)
{
internal::prefetch(src + sj);
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CONVERTTOYCRCB(vld3.8 {d0-d2}, d0, d1, d2)
#else
uint8x8x3_t vRgb = vld3_u8(src + sj);
for (; j < roiw8; sj += 32, dj += 24, j += 8)
{
internal::prefetch(src + sj);
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CONVERTTOYCRCB(vld4.8 {d0-d3}, d0, d1, d2)
#else
uint8x8x4_t vRgba = vld4_u8(src + sj);
for (; j < roiw8; sj += 24, dj += 24, j += 8)
{
internal::prefetch(src + sj);
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CONVERTTOYCRCB(vld3.8 {d0-d2}, d2, d1, d0)
#else
uint8x8x3_t vBgr = vld3_u8(src + sj);
for (; j < roiw8; sj += 32, dj += 24, j += 8)
{
internal::prefetch(src + sj);
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CONVERTTOYCRCB(vld4.8 {d0-d3}, d2, d1, d0)
#else
uint8x8x4_t vBgra = vld4_u8(src + sj);
internal::prefetch(uv + j);
internal::prefetch(y1 + j);
internal::prefetch(y2 + j);
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CONVERTYUV420TORGB(3, d1, d0, q5, q6)
#else
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
internal::prefetch(uv + j);
internal::prefetch(y1 + j);
internal::prefetch(y2 + j);
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CONVERTYUV420TORGB(4, d1, d0, q5, q6)
#else
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
internal::prefetch(uv + j);
internal::prefetch(y1 + j);
internal::prefetch(y2 + j);
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CONVERTYUV420TORGB(3, d0, d1, q5, q6)
#else
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
internal::prefetch(uv + j);
internal::prefetch(y1 + j);
internal::prefetch(y2 + j);
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CONVERTYUV420TORGB(4, d0, d1, q5, q6)
#else
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
internal::prefetch(uv + j);
internal::prefetch(y1 + j);
internal::prefetch(y2 + j);
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CONVERTYUV420TORGB(3, d1, d0, q6, q5)
#else
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
internal::prefetch(uv + j);
internal::prefetch(y1 + j);
internal::prefetch(y2 + j);
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CONVERTYUV420TORGB(4, d1, d0, q6, q5)
#else
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
internal::prefetch(uv + j);
internal::prefetch(y1 + j);
internal::prefetch(y2 + j);
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CONVERTYUV420TORGB(3, d0, d1, q6, q5)
#else
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
internal::prefetch(uv + j);
internal::prefetch(y1 + j);
internal::prefetch(y2 + j);
-#if defined(__GNUC__) && __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CONVERTYUV420TORGB(4, d0, d1, q6, q5)
#else
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
}
})
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVT_FUNC(u8, u16, 16,
register uint8x16_t zero0 asm ("q1") = vmovq_n_u8(0);,
{
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVT_FUNC(u8, s32, 16,
register uint8x16_t zero0 asm ("q1") = vmovq_n_u8(0);
register uint8x16_t zero1 asm ("q2") = vmovq_n_u8(0);
})
#endif
-#if __GNUC_MINOR__ < 6
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
CVT_FUNC(u8, f32, 16,
,
{
}
})
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVT_FUNC(s8, u16, 16,
register uint8x16_t zero0 asm ("q1") = vmovq_n_u8(0);,
{
})
#endif
-#if __GNUC_MINOR__ < 6
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
CVT_FUNC(s8, s16, 16,
,
{
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVT_FUNC(s8, s32, 16,
,
{
})
#endif
-#if __GNUC_MINOR__ < 6
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
CVT_FUNC(s8, f32, 16,
,
{
})
#endif
-#if __GNUC_MINOR__ < 6
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
CVT_FUNC(u16, u8, 16,
,
{
})
#endif
-#if __GNUC_MINOR__ < 6
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
CVT_FUNC(u16, s8, 16,
register uint8x16_t v127 asm ("q4") = vmovq_n_u8(127);,
{
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVT_FUNC(u16, s16, 8,
register uint16x8_t v32767 asm ("q4") = vmovq_n_u16(0x7FFF);,
{
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVT_FUNC(u16, s32, 8,
register uint16x8_t zero0 asm ("q1") = vmovq_n_u16(0);,
{
})
#endif
-#if __GNUC_MINOR__ < 6
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
CVT_FUNC(u16, f32, 8,
,
{
})
#endif
-#if __GNUC_MINOR__ < 6
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
CVT_FUNC(s16, u8, 16,
,
{
})
#endif
-#if __GNUC_MINOR__ < 6
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
CVT_FUNC(s16, s8, 16,
,
{
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVT_FUNC(s16, u16, 8,
register int16x8_t vZero asm ("q4") = vmovq_n_s16(0);,
{
})
#endif
-#if __GNUC_MINOR__ < 6
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
CVT_FUNC(s16, s32, 8,
,
{
})
#endif
-#if __GNUC_MINOR__ < 6
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
CVT_FUNC(s16, f32, 8,
,
{
})
#endif
-#if __GNUC_MINOR__ < 6
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
CVT_FUNC(s32, u8, 8,
,
{
})
#endif
-#if __GNUC_MINOR__ < 6
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
CVT_FUNC(s32, s8, 8,
,
{
})
#endif
-#if __GNUC_MINOR__ < 6
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
CVT_FUNC(s32, u16, 8,
,
{
})
#endif
-#if __GNUC_MINOR__ < 6
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
CVT_FUNC(s32, s16, 8,
,
{
})
#endif
-#if __GNUC_MINOR__ < 6
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
CVT_FUNC(s32, f32, 8,
,
{
})
#endif
-#if __GNUC_MINOR__ < 6
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
CVT_FUNC(f32, u8, 8,
register float32x4_t vmult asm ("q0") = vdupq_n_f32((float)(1 << 16));
register uint32x4_t vmask asm ("q1") = vdupq_n_u32(1<<16);,
})
#endif
-#if __GNUC_MINOR__ < 6
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
CVT_FUNC(f32, s8, 8,
register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
{
})
#endif
-#if __GNUC_MINOR__ < 6
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
CVT_FUNC(f32, u16, 8,
register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
{
})
#endif
-#if __GNUC_MINOR__ < 6
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
CVT_FUNC(f32, s16, 8,
register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
{
})
#endif
-#if __GNUC_MINOR__ < 6
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
CVT_FUNC(f32, s32, 8,
register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
{
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(u8, s32, 16,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(u8, f32, 16,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(s8, s32, 16,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(s8, f32, 16,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(u16, u8, 16,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(u16, s8, 16,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC1(u16, 16,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(u16, s16, 8,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(u16, s32, 8,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(u16, f32, 8,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(s16, u8, 16,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(s16, s8, 16,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(s16, u16, 8,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC1(s16, 16,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(s16, s32, 8,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(s16, f32, 8,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(s32, u8, 8,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(s32, s8, 8,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(s32, u16, 8,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(s32, s16, 8,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC1(s32, 8,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(s32, f32, 8,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(f32, u8, 8,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)((1 << 16)*alpha));
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)((1 << 16)*beta));
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(f32, s8, 8,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(f32, u16, 8,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(f32, s16, 8,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC(f32, s32, 8,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
})
#endif
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
CVTS_FUNC1(f32, 8,
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
u16* lidx1 = lane + x - 1*2;
u16* lidx3 = lane + x + 1*2;
u16* lidx4 = lane + x + 2*2;
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
__asm__ __volatile__ (
"vld2.16 {d0, d2}, [%[in0]]! \n\t"
"vld2.16 {d1, d3}, [%[in0]] \n\t"
internal::prefetch(dsrc + dstep * 2, 0);
for(x = 0; x <= wwcn - 4; x += 4, dsrc += 4*2, dIptr += 4*2 )
{
-#if __GNUC_MINOR__ < 0
+#if 0
__asm__ (
"vld2.16 {d0-d1}, [%[dsrc00]] \n\t"
"vld2.16 {d2-d3}, [%[dsrc10]] \n\t"
for (; x < roiw8; x += 8)
{
internal::prefetch(lane + 2 * x);
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
__asm__ (
"vld2.16 {d0-d3}, [%[in0]] \n\t"
"vld2.16 {d4-d7}, [%[in4]] \n\t"
for (; x < roiw4; x += 4)
{
internal::prefetch(lane + 2 * x);
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
__asm__ (
"vld2.32 {d0-d3}, [%[in0]] \n\t"
"vld2.32 {d4-d7}, [%[in4]] \n\t"
std::vector<f32> _buf(cn*(srcSize.width + 4) + 32/sizeof(f32));
f32* lane = internal::alignPtr(&_buf[2*cn], 32);
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
register float32x4_t vc6d4f32 asm ("q11") = vmovq_n_f32(1.5f); // 6/4
register float32x4_t vc1d4f32 asm ("q12") = vmovq_n_f32(0.25f); // 1/4
for (; x < roiw4; x += 4)
{
internal::prefetch(lane + 2 * x);
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
__asm__ __volatile__ (
"vld2.32 {d0-d3}, [%[in0]] \n\t"
"vld2.32 {d8-d11}, [%[in4]] \n\t"
internal::prefetch(srow0 + x);
internal::prefetch(srow1 + x);
internal::prefetch(srow2 + x);
-#if __GNUC_MINOR__ < 7
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
__asm__ (
"vld1.8 {d0}, [%[src0]] \n\t"
"vld1.8 {d2}, [%[src2]] \n\t"
x = 0;
for( ; x < roiw8; x += 8 )
{
-#if __GNUC_MINOR__ < 6
+#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
__asm__ (
"vld1.16 {d4-d5}, [%[s2ptr]] \n\t"
"vld1.16 {d8-d9}, [%[s4ptr]] \n\t"
vr.val[1] = vmlaq_s16(s3x10, s24, vc3);
vst2q_s16(drow + x*2, vr);
-#endif //__GNUC_MINOR__ < 6
+#endif
}
for( ; x < colsn; x++ )
{
file(GLOB lib_hdrs *.h)
-if(ENABLE_NEON)
- list(APPEND lib_srcs arm/arm_init.c arm/filter_neon.S arm/filter_neon_intrinsics.c)
- add_definitions(-DPNG_ARM_NEON_OPT=2)
-elseif(AARCH64)
- add_definitions(-DPNG_ARM_NEON_OPT=0) # NEON assembler is not supported
+if(ARM OR AARCH64)
+ if(ENABLE_NEON AND NOT AARCH64)
+ list(APPEND lib_srcs arm/arm_init.c arm/filter_neon.S arm/filter_neon_intrinsics.c)
+ add_definitions(-DPNG_ARM_NEON_OPT=2)
+ else()
+ add_definitions(-DPNG_ARM_NEON_OPT=0) # NEON assembler is not supported
+ endif()
endif()
if(ENABLE_SSE
project(${WEBP_LIBRARY})
ocv_include_directories(${CMAKE_CURRENT_SOURCE_DIR})
-ocv_include_directories(${CPUFEATURES_INCLUDE_DIRS})
+if(ANDROID)
+ ocv_include_directories(${CPUFEATURES_INCLUDE_DIRS})
+endif()
file(GLOB lib_srcs dec/*.c demux/*.c dsp/*.c enc/*.c mux/*.c utils/*.c webp/*.c)
file(GLOB lib_hdrs dec/*.h demux/*.h dsp/*.h enc/*.h mux/*.h utils/*.h webp/*.h)
add_definitions(-DWEBP_USE_THREAD)
add_library(${WEBP_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs})
-target_link_libraries(${WEBP_LIBRARY} ${CPUFEATURES_LIBRARIES})
+if(ANDROID)
+ target_link_libraries(${WEBP_LIBRARY} ${CPUFEATURES_LIBRARIES})
+endif()
if(UNIX)
if(CMAKE_COMPILER_IS_GNUCXX OR CV_ICC)
OCV_OPTION(WITH_AVFOUNDATION "Use AVFoundation for Video I/O (iOS/Mac)" ON IF APPLE)
OCV_OPTION(WITH_CARBON "Use Carbon for UI instead of Cocoa" OFF IF APPLE )
OCV_OPTION(WITH_CAROTENE "Use NVidia carotene acceleration library for ARM platform" ON IF (ARM OR AARCH64) AND NOT IOS AND NOT (CMAKE_VERSION VERSION_LESS "2.8.11"))
+OCV_OPTION(WITH_CPUFEATURES "Use cpufeatures Android library" ON IF ANDROID)
OCV_OPTION(WITH_VTK "Include VTK library support (and build opencv_viz module eiher)" ON IF (NOT ANDROID AND NOT IOS AND NOT WINRT AND NOT CMAKE_CROSSCOMPILING) )
OCV_OPTION(WITH_CUDA "Include NVidia Cuda Runtime support" ON IF (NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_CUFFT "Include NVidia Cuda Fast Fourier Transform (FFT) library support" ON IF (NOT IOS AND NOT WINRT) )
OCV_OPTION(ENABLE_OMIT_FRAME_POINTER "Enable -fomit-frame-pointer for GCC" ON IF CMAKE_COMPILER_IS_GNUCXX AND NOT (APPLE AND CMAKE_COMPILER_IS_CLANGCXX) )
OCV_OPTION(ENABLE_POWERPC "Enable PowerPC for GCC" ON IF (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_SYSTEM_PROCESSOR MATCHES powerpc.*) )
OCV_OPTION(ENABLE_FAST_MATH "Enable -ffast-math (not recommended for GCC 4.6.x)" OFF IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) )
-OCV_OPTION(ENABLE_NEON "Enable NEON instructions" "${NEON}" IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR AARCH64 OR IOS) )
+OCV_OPTION(ENABLE_NEON "Enable NEON instructions" (NEON OR ANDROID_ARM_NEON OR AARCH64) IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR AARCH64 OR IOS) )
OCV_OPTION(ENABLE_VFPV3 "Enable VFPv3-D32 instructions" OFF IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR AARCH64 OR IOS) )
OCV_OPTION(ENABLE_NOISY_WARNINGS "Show all warnings even if they are too noisy" OFF )
OCV_OPTION(OPENCV_WARNINGS_ARE_ERRORS "Treat warnings as errors" OFF )
# Detect 3rd-party libraries
# ----------------------------------------------------------------------------
-if(ANDROID)
- add_subdirectory(3rdparty/cpufeatures)
+if(ANDROID AND WITH_CPUFEATURES)
+ add_subdirectory(3rdparty/cpufeatures)
+ set(HAVE_CPUFEATURES 1)
endif()
include(cmake/OpenCVFindLibsGrfmt.cmake)
macro(ocv_append_optimization_flag var OPT)
if(CPU_${OPT}_FLAGS_CONFLICT)
- string(REGEX REPLACE " ${CPU_${OPT}_FLAGS_CONFLICT}" "" ${var} " ${${var}}")
+ string(REGEX REPLACE " ${CPU_${OPT}_FLAGS_CONFLICT}" "" ${var} " ${${var}} ")
string(REGEX REPLACE "^ +" "" ${var} "${${var}}")
endif()
set(${var} "${${var}} ${CPU_${OPT}_FLAGS_ON}")
endif()
elseif(ARM OR AARCH64)
+ ocv_update(CPU_NEON_TEST_FILE "${OpenCV_SOURCE_DIR}/cmake/checks/cpu_neon.cpp")
ocv_update(CPU_FP16_TEST_FILE "${OpenCV_SOURCE_DIR}/cmake/checks/cpu_fp16.cpp")
if(NOT AARCH64)
ocv_update(CPU_KNOWN_OPTIMIZATIONS "VFPV3;NEON;FP16")
- ocv_update(CPU_NEON_FLAGS_ON "-mfpu=neon")
ocv_update(CPU_VFPV3_FLAGS_ON "-mfpu=vfpv3")
+ ocv_update(CPU_NEON_FLAGS_ON "-mfpu=neon")
+ ocv_update(CPU_NEON_FLAGS_CONFLICT "-mfpu=[^ ]*")
ocv_update(CPU_FP16_FLAGS_ON "-mfpu=neon-fp16")
- set(CPU_BASELINE "DETECT" CACHE STRING "${HELP_CPU_BASELINE}")
+ ocv_update(CPU_FP16_IMPLIES "NEON")
+ ocv_update(CPU_FP16_FLAGS_CONFLICT "-mfpu=[^ ]*")
else()
ocv_update(CPU_KNOWN_OPTIMIZATIONS "NEON;FP16")
ocv_update(CPU_NEON_FLAGS_ON "")
- set(CPU_BASELINE "NEON" CACHE STRING "${HELP_CPU_BASELINE}")
+ ocv_update(CPU_FP16_IMPLIES "NEON")
+ set(CPU_BASELINE "NEON;FP16" CACHE STRING "${HELP_CPU_BASELINE}")
endif()
endif()
set_property(CACHE CPU_BASELINE PROPERTY STRINGS "" ${CPU_KNOWN_OPTIMIZATIONS})
set_property(CACHE CPU_DISPATCH PROPERTY STRINGS "" ${CPU_KNOWN_OPTIMIZATIONS})
+# Update CPU_BASELINE_DETECT flag
+if(";${CPU_BASELINE};" MATCHES ";DETECT;")
+ set(CPU_BASELINE_DETECT ON)
+endif()
+
set(CPU_BASELINE_FLAGS "")
set(CPU_BASELINE_FINAL "")
if(CPU_${OPT}_SUPPORTED)
if(";${CPU_DISPATCH};" MATCHES ";${OPT};" AND NOT __is_from_baseline)
list(APPEND CPU_DISPATCH_FINAL ${OPT})
- elseif(__is_from_baseline AND NOT CPU_BASELINE_DETECT)
+ elseif(__is_from_baseline)
list(APPEND CPU_BASELINE_FINAL ${OPT})
ocv_append_optimization_flag(CPU_BASELINE_FLAGS ${OPT})
endif()
if(ARM)
add_extra_compiler_option("-mfp16-format=ieee")
endif(ARM)
- if(ENABLE_NEON)
- add_extra_compiler_option("-mfpu=neon")
- endif()
- if(ENABLE_VFPV3 AND NOT ENABLE_NEON)
- add_extra_compiler_option("-mfpu=vfpv3")
- endif()
endmacro()
macro(ocv_compiler_optimization_options_finalize)
ocv_clear_vars(WEBP_FOUND WEBP_LIBRARY WEBP_LIBRARIES WEBP_INCLUDE_DIR)
else()
include(cmake/OpenCVFindWebP.cmake)
+ if(WEBP_FOUND)
+ set(HAVE_WEBP 1)
+ endif()
endif()
endif()
# --- Add libwebp to 3rdparty/libwebp and compile it if not available ---
-if(WITH_WEBP AND NOT WEBP_FOUND)
+if(WITH_WEBP AND NOT WEBP_FOUND
+ AND (NOT ANDROID OR HAVE_CPUFEATURES)
+)
set(WEBP_LIBRARY libwebp)
set(WEBP_LIBRARIES ${WEBP_LIBRARY})
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libwebp")
set(WEBP_INCLUDE_DIR "${${WEBP_LIBRARY}_SOURCE_DIR}")
+ set(HAVE_WEBP 1)
endif()
if(NOT WEBP_VERSION AND WEBP_INCLUDE_DIR)
endif(NOT ENABLE_NOISY_WARNINGS)
endmacro()
+macro(ocv_append_sourge_file_compile_definitions source)
+ get_source_file_property(_value "${source}" COMPILE_DEFINITIONS)
+ if(_value)
+ set(_value ${_value} ${ARGN})
+ else()
+ set(_value ${ARGN})
+ endif()
+ set_source_files_properties("${source}" PROPERTIES COMPILE_DEFINITIONS "${_value}")
+endmacro()
+
macro(add_apple_compiler_options the_module)
ocv_check_flag_support(OBJCXX "-fobjc-exceptions" HAVE_OBJC_EXCEPTIONS "")
if(HAVE_OBJC_EXCEPTIONS)
--- /dev/null
+#include <stdio.h>
+
+#if (defined WIN32 || defined _WIN32) && defined(_M_ARM)
+# include <Intrin.h>
+# include <arm_neon.h>
+# define CV_NEON 1
+#elif defined(__ARM_NEON__) || (defined (__ARM_NEON) && defined(__aarch64__))
+# include <arm_neon.h>
+# define CV_NEON 1
+#endif
+
+#if defined CV_NEON
+int test()
+{
+ const float src[] = { 0.0f, 0.0f, 0.0f, 0.0f };
+ float32x4_t val = vld1q_f32((const float32_t*)(src));
+ return (int)vgetq_lane_f32(val, 0);
+}
+#else
+#error "NEON is not supported"
+#endif
+
+int main()
+{
+ printf("%d\n", test());
+ return 0;
+}
ocv_glob_module_sources(SOURCES "${OPENCV_MODULE_opencv_core_BINARY_DIR}/version_string.inc"
HEADERS ${lib_cuda_hdrs} ${lib_cuda_hdrs_detail})
-ocv_module_include_directories(${the_module} ${ZLIB_INCLUDE_DIRS} ${OPENCL_INCLUDE_DIRS} ${CPUFEATURES_INCLUDE_DIRS})
+ocv_module_include_directories(${the_module} ${ZLIB_INCLUDE_DIRS} ${OPENCL_INCLUDE_DIRS})
+if(ANDROID AND HAVE_CPUFEATURES)
+ ocv_append_sourge_file_compile_definitions(${CMAKE_CURRENT_SOURCE_DIR}/src/system.cpp "HAVE_CPUFEATURES=1")
+ ocv_module_include_directories(${CPUFEATURES_INCLUDE_DIRS})
+endif()
ocv_create_module(${extra_libs})
ocv_target_link_libraries(${the_module} ${ZLIB_LIBRARIES} "${OPENCL_LIBRARIES}" "${VA_LIBRARIES}" "${LAPACK_LIBRARIES}" "${CPUFEATURES_LIBRARIES}")
# define CV_AVX 1
#endif
#ifdef CV_CPU_COMPILE_FP16
-# include <immintrin.h>
+# if defined(__arm__) || defined(__aarch64__) || defined(_M_ARM)
+# include <arm_neon.h>
+# else
+# include <immintrin.h>
+# endif
# define CV_FP16 1
#endif
#ifdef CV_CPU_COMPILE_AVX2
#endif
#if CV_FP16
-// Workaround for old comiplers
+// Workaround for old compilers
template <typename T> static inline int16x4_t vreinterpret_s16_f16(T a)
{ return (int16x4_t)a; }
template <typename T> static inline float16x4_t vreinterpret_f16_s16(T a)
{ return (float16x4_t)a; }
-template <typename T> static inline float16x4_t vld1_f16(const T* ptr)
-{ return vreinterpret_f16_s16(vld1_s16((const short*)ptr)); }
-template <typename T> static inline void vst1_f16(T* ptr, float16x4_t a)
-{ vst1_s16((short*)ptr, vreinterpret_s16_f16(a)); }
+template <typename T> static inline float16x4_t cv_vld1_f16(const T* ptr)
+{
+#ifndef vld1_f16 // APPLE compiler defines vld1_f16 as macro
+ return vreinterpret_f16_s16(vld1_s16((const short*)ptr));
+#else
+ return vld1_f16((const __fp16*)ptr);
+#endif
+}
+template <typename T> static inline void cv_vst1_f16(T* ptr, float16x4_t a)
+{
+#ifndef vst1_f16 // APPLE compiler defines vst1_f16 as macro
+ vst1_s16((short*)ptr, vreinterpret_s16_f16(a));
+#else
+ vst1_f16((__fp16*)ptr, a);
+#endif
+}
struct v_float16x4
{
v_float16x4(short v0, short v1, short v2, short v3)
{
short v[] = {v0, v1, v2, v3};
- val = vld1_f16(v);
+ val = cv_vld1_f16(v);
}
short get0() const
{
#if CV_FP16
// Workaround for old comiplers
inline v_float16x4 v_load_f16(const short* ptr)
-{ return v_float16x4(vld1_f16(ptr)); }
+{ return v_float16x4(cv_vld1_f16(ptr)); }
inline void v_store_f16(short* ptr, v_float16x4& a)
-{ vst1_f16(ptr, a.val); }
+{ cv_vst1_f16(ptr, a.val); }
#endif
#define OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(_Tpvec, _Tpnvec, scalartype, func, vectorfunc, suffix) \
#elif CV_NEON
const static int cVectorWidth = 4;
-template <typename T> static inline float16x4_t vld1_f16(const T* ptr)
-{ return (float16x4_t)vld1_s16((const short*)ptr); }
-template <typename T> static inline void vst1_f16(T* ptr, float16x4_t a)
-{ vst1_s16((short*)ptr, a); }
-
void cvtScaleHalf_SIMD32f16f( const float* src, size_t sstep, short* dst, size_t dstep, cv::Size size )
{
CV_INSTRUMENT_REGION()
float16x4_t v_dst = vcvt_f16_f32(v_src);
- vst1_f16((__fp16*)dst + x, v_dst);
+ cv_vst1_f16((__fp16*)dst + x, v_dst);
}
for ( ; x < size.width; x++ )
int x = 0;
for ( ; x <= size.width - cVectorWidth ; x += cVectorWidth )
{
- float16x4_t v_src = vld1_f16((__fp16*)src + x);
+ float16x4_t v_src = cv_vld1_f16((__fp16*)src + x);
float32x4_t v_dst = vcvt_f32_f16(v_src);
#endif
#if CV_FP16_TYPE
-float convertFp16SW(short fp16)
+inline float convertFp16SW(short fp16)
{
// Fp16 -> Fp32
Cv16suf a;
return (float)a.h;
}
#else
-float convertFp16SW(short fp16)
+inline float convertFp16SW(short fp16)
{
// Fp16 -> Fp32
Cv16suf b;
#endif
#if CV_FP16_TYPE
-short convertFp16SW(float fp32)
+inline short convertFp16SW(float fp32)
{
// Fp32 -> Fp16
Cv16suf a;
return a.i;
}
#else
-short convertFp16SW(float fp32)
+inline short convertFp16SW(float fp32)
{
// Fp32 -> Fp16
Cv32suf a;
#endif
#endif
-#if defined ANDROID
+#if defined ANDROID && defined HAVE_CPUFEATURES
# include <cpu-features.h>
#endif
have[CV_CPU_NEON] = true;
have[CV_CPU_FP16] = true;
#elif defined __arm__ && defined __ANDROID__
+ #if defined HAVE_CPUFEATURES
__android_log_print(ANDROID_LOG_INFO, "OpenCV", "calling android_getCpuFeatures() ...");
uint64_t features = android_getCpuFeatures();
__android_log_print(ANDROID_LOG_INFO, "OpenCV", "calling android_getCpuFeatures() ... Done (%llx)", features);
have[CV_CPU_NEON] = (features & ANDROID_CPU_ARM_FEATURE_NEON) != 0;
have[CV_CPU_FP16] = (features & ANDROID_CPU_ARM_FEATURE_VFP_FP16) != 0;
+ #else
+ __android_log_print(ANDROID_LOG_INFO, "OpenCV", "cpufeatures library is not avaialble for CPU detection");
+ #if CV_NEON
+ __android_log_print(ANDROID_LOG_INFO, "OpenCV", "- NEON instructions is enabled via build flags");
+ have[CV_CPU_NEON] = true;
+ #else
+ __android_log_print(ANDROID_LOG_INFO, "OpenCV", "- NEON instructions is NOT enabled via build flags");
+ #endif
+ #if CV_FP16
+ __android_log_print(ANDROID_LOG_INFO, "OpenCV", "- FP16 instructions is enabled via build flags");
+ have[CV_CPU_FP16] = true;
+ #else
+ __android_log_print(ANDROID_LOG_INFO, "OpenCV", "- FP16 instructions is NOT enabled via build flags");
+ #endif
+ #endif
#elif defined __arm__
int cpufile = open("/proc/self/auxv", O_RDONLY);
list(APPEND GRFMT_LIBS ${JPEG_LIBRARIES})
endif()
-if(WITH_WEBP)
+if(HAVE_WEBP)
add_definitions(-DHAVE_WEBP)
ocv_include_directories(${WEBP_INCLUDE_DIR})
list(APPEND GRFMT_LIBS ${WEBP_LIBRARIES})