2 * Copyright (c) 2016, 2017 ARM Limited.
4 * SPDX-License-Identifier: MIT
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 #include "arm_compute/core/NEON/kernels/NECannyEdgeKernel.h"
26 #include "arm_compute/core/AccessWindowStatic.h"
27 #include "arm_compute/core/Error.h"
28 #include "arm_compute/core/Helpers.h"
29 #include "arm_compute/core/ITensor.h"
30 #include "arm_compute/core/TensorInfo.h"
31 #include "arm_compute/core/Types.h"
32 #include "arm_compute/core/Utils.h"
33 #include "arm_compute/core/Validate.h"
40 using namespace arm_compute;
45 } // namespace arm_compute
49 constexpr int NO_EDGE = 0;
50 constexpr int EDGE = 255;
51 constexpr int MAYBE = 127;
54 #ifdef ARM_COMPUTE_ENABLE_FP16
57 inline uint8x8_t phase_quantization(const float32x4x2_t &gx, const float32x4x2_t &gy)
59 // Constant use for evaluating score1 and score3
60 static const float32x4_t const45 = vdupq_n_f32(0.70710678118655f);
61 static const float32x4_t zero = vdupq_n_f32(0.0f);
62 static const float32x4_t one = vdupq_n_f32(1.0f);
63 static const float32x4_t two = vdupq_n_f32(2.0f);
64 static const float32x4_t three = vdupq_n_f32(3.0f);
67 const float32x4x2_t score0 =
74 const float32x4x2_t score2 =
80 // Score1 and Score3: ( sqrt(2) / 2, sqrt(2) / 2 ) - ( -sqrt(2) / 2, sqrt(2) / 2 )
81 float32x4x2_t score1 =
83 vmulq_f32(gy.val[0], const45),
84 vmulq_f32(gy.val[1], const45)
87 float32x4x2_t score3 = score1;
89 score1.val[0] = vmlaq_f32(score1.val[0], gx.val[0], const45);
90 score1.val[1] = vmlaq_f32(score1.val[1], gx.val[1], const45);
91 score3.val[0] = vmlsq_f32(score3.val[0], gx.val[0], const45);
92 score3.val[1] = vmlsq_f32(score3.val[1], gx.val[1], const45);
94 score1.val[0] = vabsq_f32(score1.val[0]);
95 score1.val[1] = vabsq_f32(score1.val[1]);
96 score3.val[0] = vabsq_f32(score3.val[0]);
97 score3.val[1] = vabsq_f32(score3.val[1]);
105 float32x4x2_t old_score = score0;
107 // score1 > old_score?
110 vcgtq_f32(score1.val[0], old_score.val[0]),
111 vcgtq_f32(score1.val[1], old_score.val[1])
114 phase.val[0] = vbslq_f32(mask.val[0], one, phase.val[0]);
115 phase.val[1] = vbslq_f32(mask.val[1], one, phase.val[1]);
116 old_score.val[0] = vbslq_f32(mask.val[0], score1.val[0], old_score.val[0]);
117 old_score.val[1] = vbslq_f32(mask.val[1], score1.val[1], old_score.val[1]);
119 // score2 > old_score?
120 mask.val[0] = vcgtq_f32(score2.val[0], old_score.val[0]);
121 mask.val[1] = vcgtq_f32(score2.val[1], old_score.val[1]);
123 phase.val[0] = vbslq_f32(mask.val[0], two, phase.val[0]);
124 phase.val[1] = vbslq_f32(mask.val[1], two, phase.val[1]);
125 old_score.val[0] = vbslq_f32(mask.val[0], score2.val[0], old_score.val[0]);
126 old_score.val[1] = vbslq_f32(mask.val[1], score2.val[1], old_score.val[1]);
128 // score3 > old_score?
129 mask.val[0] = vcgtq_f32(score3.val[0], old_score.val[0]);
130 mask.val[1] = vcgtq_f32(score3.val[1], old_score.val[1]);
132 phase.val[0] = vbslq_f32(mask.val[0], three, phase.val[0]);
133 phase.val[1] = vbslq_f32(mask.val[1], three, phase.val[1]);
134 old_score.val[0] = vbslq_f32(mask.val[0], score3.val[0], old_score.val[0]);
135 old_score.val[1] = vbslq_f32(mask.val[1], score3.val[1], old_score.val[1]);
137 // Convert from float32x4_t to uint8x8_t
138 return vmovn_u16(vcombine_u16(vmovn_u32(vcvtq_u32_f32(phase.val[0])),
139 vmovn_u32(vcvtq_u32_f32(phase.val[1]))));
142 inline uint8x8_t phase_quantization(float16x8_t gx, float16x8_t gy)
144 // Constant use for evaluating score1 and score3
145 static const float16x8_t const45 = vdupq_n_f16(0.70710678118655f);
146 static const float16x8_t zero = vdupq_n_f16(0.0f);
147 static const float16x8_t one = vdupq_n_f16(1.0f);
148 static const float16x8_t two = vdupq_n_f16(2.0f);
149 static const float16x8_t three = vdupq_n_f16(3.0f);
152 const float16x8_t score0 = vabsq_f16(gx);
155 const float16x8_t score2 = vabsq_f16(gy);
157 // Score1 and Score3: ( sqrt(2) / 2, sqrt(2) / 2 ) - ( -sqrt(2) / 2, sqrt(2) / 2 )
158 float16x8_t score1 = vmulq_f16(gy, const45);
159 float16x8_t score3 = score1;
161 score1 = vfmaq_f16(score1, gx, const45);
162 score3 = vfmsq_f16(score3, gx, const45);
164 score1 = vabsq_f16(score1);
165 score3 = vabsq_f16(score3);
167 float16x8_t phase = zero;
168 float16x8_t old_score = score0;
170 // score1 > old_score?
171 uint16x8_t mask = vcgtq_f16(score1, old_score);
173 phase = vbslq_f16(mask, one, phase);
174 old_score = vbslq_f16(mask, score1, old_score);
176 // score2 > old_score?
177 mask = vcgtq_f16(score2, old_score);
179 phase = vbslq_f16(mask, two, phase);
180 old_score = vbslq_f16(mask, score2, old_score);
182 // score3 > old_score?
183 mask = vcgtq_f16(score3, old_score);
185 phase = vbslq_f16(mask, three, phase);
187 // Convert from float16x8_t to uint8x8_t
188 return vmovn_u16(vcvtq_u16_f16(phase));
191 /** Computes the gradient phase if gradient_size = 3 or 5. The output is quantized.
192 * 0 = 0°, 1 = 45°, 2 = 90°, 3 = 135°
194 * @param[in] gx Gx component
195 * @param[in] gy Gy component
197 * @return quantized phase for 8 pixels
199 inline uint8x8_t phase_quantization_S16_S16(int16x8_t gx, int16x8_t gy)
201 return phase_quantization(vcvtq_f16_s16(gx), vcvtq_f16_s16(gy));
204 /** Computes the gradient phase if gradient_size = 7. The output is quantized.
205 * 0 = 0°, 1 = 45°, 2 = 90°, 3 = 135°
207 * @param[in] gx Gx component
208 * @param[in] gy Gy component
210 * @return quantized phase for 8 pixels
212 inline uint8x8_t phase_quantization_S32_S32(const int32x4x2_t &gx, const int32x4x2_t &gy)
215 const float32x4x2_t gx_f32 =
217 vcvtq_f32_s32(gx.val[0]),
218 vcvtq_f32_s32(gx.val[1])
221 const float32x4x2_t gy_f32 =
223 vcvtq_f32_s32(gy.val[0]),
224 vcvtq_f32_s32(gy.val[1])
227 return phase_quantization(gx_f32, gy_f32);
230 /** Computes the magnitude using the L1-norm type if gradient_size = 3 or 5
232 * @param[in] gx Gx component
233 * @param[in] gy Gy component
235 * @return magnitude for 8 pixels
237 inline uint16x8_t mag_l1_S16_S16(int16x8_t gx, int16x8_t gy)
239 return vaddq_u16(vreinterpretq_u16_s16(vabsq_s16(gx)),
240 vreinterpretq_u16_s16(vabsq_s16(gy)));
243 /** Computes the magnitude using the L1-norm type if gradient_size = 7
245 * @param[in] gx Gx component
246 * @param[in] gy Gy component
248 * @return magnitude for 8 pixels
250 inline uint32x4x2_t mag_l1_S32_S32(const int32x4x2_t &gx, const int32x4x2_t &gy)
252 const uint32x4x2_t gx_abs =
254 vreinterpretq_u32_s32(vabsq_s32(gx.val[0])),
255 vreinterpretq_u32_s32(vabsq_s32(gx.val[1]))
258 const uint32x4x2_t gy_abs =
260 vreinterpretq_u32_s32(vabsq_s32(gy.val[0])),
261 vreinterpretq_u32_s32(vabsq_s32(gy.val[1]))
264 const uint32x4x2_t out =
266 vaddq_u32(gx_abs.val[0], gy_abs.val[0]),
267 vaddq_u32(gx_abs.val[1], gy_abs.val[1])
273 inline float32x4x2_t mag_l2(const float32x4x2_t &gx, const float32x4x2_t &gy)
278 vmulq_f32(gx.val[0], gx.val[0]),
279 vmulq_f32(gx.val[1], gx.val[1])
283 mag.val[0] = vmlaq_f32(mag.val[0], gy.val[0], gy.val[0]);
284 mag.val[1] = vmlaq_f32(mag.val[1], gy.val[1], gy.val[1]);
287 mag.val[0] = vmulq_f32(vrsqrteq_f32(mag.val[0]), mag.val[0]);
288 mag.val[1] = vmulq_f32(vrsqrteq_f32(mag.val[1]), mag.val[1]);
293 inline float16x8_t mag_l2(float16x8_t gx, float16x8_t gy)
296 float16x8_t mag = vmulq_f16(gx, gx);
299 mag = vfmaq_f16(mag, gy, gy);
302 mag = vmulq_f16(vrsqrteq_f16(mag), mag);
307 /** Computes the magnitude using L2-norm if gradient_size = 3 or 5
309 * @param[in] gx Gx component
310 * @param[in] gy Gy component
312 * @return magnitude for 8 pixels
314 inline uint16x8_t mag_l2_S16_S16(int16x8_t gx, int16x8_t gy)
316 /* Compute magnitude using L2 normalization */
317 const float16x8_t gx2 = vcvtq_f16_s16(gx);
318 const float16x8_t gy2 = vcvtq_f16_s16(gy);
319 const float16x8_t mag = mag_l2(gx2, gy2);
321 /* Store magnitude - Convert to uint16x8 */
322 return vcvtq_u16_f16(mag);
325 /** Computes the magnitude using L2-norm if gradient_size = 7
327 * @param[in] gx Gx component
328 * @param[in] gy Gy component
330 * @return magnitude for 8 pixels
332 inline uint32x4x2_t mag_l2_S32_S32(const int32x4x2_t &gx, const int32x4x2_t &gy)
334 // Compute magnitude using L2 normalization
337 vcvtq_f32_s32(gx.val[0]),
338 vcvtq_f32_s32(gx.val[1])
343 vcvtq_f32_s32(gy.val[0]),
344 vcvtq_f32_s32(gy.val[1])
347 const float32x4x2_t mag = mag_l2(gx2, gy2);
348 const uint32x4x2_t mag32 =
350 vcvtq_u32_f32(mag.val[0]),
351 vcvtq_u32_f32(mag.val[1])
357 /** Gradient function used when the gradient size = 3 or 5 and when the norm_type = L1-norm
359 * @param[in] in1_ptr Pointer to source image. Gx image. Data type supported S16
360 * @param[in] in2_ptr Pointer to source image. Gy image. Data type supported S16
361 * @param[out] out1_ptr Pointer to destination image. Magnitude. Data type supported U16
362 * @param[out] out2_ptr Pointer to destination image. Quantized phase. Data type supported U8
364 void mag_phase_l1norm_S16_S16_U16_U8(const void *__restrict in1_ptr, const void *__restrict in2_ptr, void *__restrict out1_ptr, void *__restrict out2_ptr)
366 const auto in1 = static_cast<const int16_t *__restrict>(in1_ptr);
367 const auto in2 = static_cast<const int16_t *__restrict>(in2_ptr);
368 const auto out1 = static_cast<uint16_t *__restrict>(out1_ptr);
369 const auto out2 = static_cast<uint8_t *__restrict>(out2_ptr);
371 const int16x8x4_t gx =
379 const int16x8x4_t gy =
387 // Compute and store phase
388 vst1_u8(out2 + 0, phase_quantization_S16_S16(gx.val[0], gy.val[0]));
389 vst1_u8(out2 + 8, phase_quantization_S16_S16(gx.val[1], gy.val[1]));
390 vst1_u8(out2 + 16, phase_quantization_S16_S16(gx.val[2], gy.val[2]));
391 vst1_u8(out2 + 24, phase_quantization_S16_S16(gx.val[3], gy.val[3]));
393 // Compute ans store magnitude using L1 normalization
394 vst1q_u16(out1 + 0, mag_l1_S16_S16(gx.val[0], gy.val[0]));
395 vst1q_u16(out1 + 8, mag_l1_S16_S16(gx.val[1], gy.val[1]));
396 vst1q_u16(out1 + 16, mag_l1_S16_S16(gx.val[2], gy.val[2]));
397 vst1q_u16(out1 + 24, mag_l1_S16_S16(gx.val[3], gy.val[3]));
400 /** Gradient function used when the gradient size = 3 or 5 and when the norm_type = L2-norm
402 * @param[in] in1_ptr Pointer to source image. Gx image. Data type supported S16
403 * @param[in] in2_ptr Pointer to source image. Gy image. Data type supported S16
404 * @param[out] out1_ptr Pointer to destination image. Magnitude. Data type supported U16
405 * @param[out] out2_ptr Pointer to destination image. Quantized phase. Data type supported U8
407 void mag_phase_l2norm_S16_S16_U16_U8(const void *__restrict in1_ptr, const void *__restrict in2_ptr, void *__restrict out1_ptr, void *__restrict out2_ptr)
409 const auto in1 = static_cast<const int16_t *__restrict>(in1_ptr);
410 const auto in2 = static_cast<const int16_t *__restrict>(in2_ptr);
411 const auto out1 = static_cast<uint16_t *__restrict>(out1_ptr);
412 const auto out2 = static_cast<uint8_t *__restrict>(out2_ptr);
414 const int16x8x4_t gx =
422 const int16x8x4_t gy =
430 // Compute and store phase
431 vst1_u8(out2 + 0, phase_quantization_S16_S16(gx.val[0], gy.val[0]));
432 vst1_u8(out2 + 8, phase_quantization_S16_S16(gx.val[1], gy.val[1]));
433 vst1_u8(out2 + 16, phase_quantization_S16_S16(gx.val[2], gy.val[2]));
434 vst1_u8(out2 + 24, phase_quantization_S16_S16(gx.val[3], gy.val[3]));
436 // Compute and store magnitude using L2 normalization
437 vst1q_u16(out1 + 0, mag_l2_S16_S16(gx.val[0], gy.val[0]));
438 vst1q_u16(out1 + 8, mag_l2_S16_S16(gx.val[1], gy.val[1]));
439 vst1q_u16(out1 + 16, mag_l2_S16_S16(gx.val[2], gy.val[2]));
440 vst1q_u16(out1 + 24, mag_l2_S16_S16(gx.val[3], gy.val[3]));
443 /** Gradient function used when the gradient size = 7 and when the norm_type = L1-norm
445 * @param[in] in1_ptr Pointer to source image. Gx image. Data type supported S32
446 * @param[in] in2_ptr Pointer to source image. Gy image. Data type supported S32
447 * @param[out] out1_ptr Pointer to destination image. Magnitude. Data type supported U32
448 * @param[out] out2_ptr Pointer to destination image. Quantized phase. Data type supported U8
450 void mag_phase_l1norm_S32_S32_U32_U8(const void *__restrict in1_ptr, const void *__restrict in2_ptr, void *__restrict out1_ptr, void *__restrict out2_ptr)
452 auto in1 = static_cast<const int32_t *__restrict>(in1_ptr);
453 auto in2 = static_cast<const int32_t *__restrict>(in2_ptr);
454 auto out1 = static_cast<uint32_t *__restrict>(out1_ptr);
455 auto out2 = static_cast<uint8_t *__restrict>(out2_ptr);
457 // Process low and high part
458 for(size_t i = 0; i < 2; ++i, in1 += 16, in2 += 16, out1 += 16, out2 += 16)
460 const int32x4x2_t gx0 =
466 const int32x4x2_t gx1 =
472 const int32x4x2_t gy0 =
478 const int32x4x2_t gy1 =
484 // Compute and store phase
485 vst1_u8(out2 + 0, phase_quantization_S32_S32(gx0, gy0));
486 vst1_u8(out2 + 8, phase_quantization_S32_S32(gx1, gy1));
488 // Compute magnitude using L1 normalization
489 const uint32x4x2_t mag0 = mag_l1_S32_S32(gx0, gy0);
490 const uint32x4x2_t mag1 = mag_l1_S32_S32(gx1, gy1);
493 vst1q_u32(out1 + 0, mag0.val[0]);
494 vst1q_u32(out1 + 4, mag0.val[1]);
495 vst1q_u32(out1 + 8, mag1.val[0]);
496 vst1q_u32(out1 + 12, mag1.val[1]);
500 /** Gradient function used when the gradient size = 7 and when the norm_type = L2-norm
502 * @param[in] in1_ptr Pointer to source image. Gx image. Data type supported S32
503 * @param[in] in2_ptr Pointer to source image. Gy image. Data type supported S32
504 * @param[out] out1_ptr Pointer to destination image. Magnitude. Data type supported U32
505 * @param[out] out2_ptr Pointer to destination image. Quantized phase. Data type supported U8
507 void mag_phase_l2norm_S32_S32_U32_U8(const void *__restrict in1_ptr, const void *__restrict in2_ptr, void *__restrict out1_ptr, void *__restrict out2_ptr)
509 auto in1 = static_cast<const int32_t *__restrict>(in1_ptr);
510 auto in2 = static_cast<const int32_t *__restrict>(in2_ptr);
511 auto out1 = static_cast<uint32_t *__restrict>(out1_ptr);
512 auto out2 = static_cast<uint8_t *__restrict>(out2_ptr);
514 // Process low and high part
515 for(size_t i = 0; i < 2; ++i, in1 += 16, in2 += 16, out1 += 16, out2 += 16)
517 const int32x4x2_t gx0 =
523 const int32x4x2_t gx1 =
529 const int32x4x2_t gy0 =
535 const int32x4x2_t gy1 =
541 // Compute and store phase
542 vst1_u8(out2 + 0, phase_quantization_S32_S32(gx0, gy0));
543 vst1_u8(out2 + 8, phase_quantization_S32_S32(gx1, gy1));
545 // Compute magnitude using L2 normalization
546 const uint32x4x2_t mag0 = mag_l2_S32_S32(gx0, gy0);
547 const uint32x4x2_t mag1 = mag_l2_S32_S32(gx1, gy1);
550 vst1q_u32(out1 + 0, mag0.val[0]);
551 vst1q_u32(out1 + 4, mag0.val[1]);
552 vst1q_u32(out1 + 8, mag1.val[0]);
553 vst1q_u32(out1 + 12, mag1.val[1]);
557 inline uint16x4_t non_max_U32_helper(const uint32_t *in, const uint16x4_t pc, const uint32_t stride_mag, const int32_t lower_thr, const int32_t upper_thr)
560 const uint32x4_t pc32 = vmovl_u16(pc);
562 // Get magnitude for 4 pixel
563 uint32x4_t mc = vld1q_u32(in);
565 // Angle_quantized: 0 = 0°, 1 = 45°, 2 = 90°, 3 = 135°
567 const uint32x4_t mk0_0 = vld1q_u32(in - 1);
568 const uint32x4_t mk0_1 = vld1q_u32(in + 1);
569 uint32x4_t mask0 = vceqq_u32(pc32, vdupq_n_u32(0));
570 mask0 = vandq_u32(mask0, vcgeq_u32(mc, mk0_0));
571 mask0 = vandq_u32(mask0, vcgeq_u32(mc, mk0_1));
574 const uint32x4_t mk45_0 = vld1q_u32(in - stride_mag - 1);
575 const uint32x4_t mk45_1 = vld1q_u32(in + stride_mag + 1);
576 uint32x4_t mask1 = vceqq_u32(pc32, vdupq_n_u32(1));
577 mask1 = vandq_u32(mask1, vcgeq_u32(mc, mk45_0));
578 mask1 = vandq_u32(mask1, vcgeq_u32(mc, mk45_1));
581 const uint32x4_t mk90_0 = vld1q_u32(in - stride_mag);
582 const uint32x4_t mk90_1 = vld1q_u32(in + stride_mag);
583 uint32x4_t mask2 = vceqq_u32(pc32, vdupq_n_u32(2));
584 mask2 = vandq_u32(mask2, vcgeq_u32(mc, mk90_0));
585 mask2 = vandq_u32(mask2, vcgeq_u32(mc, mk90_1));
588 const uint32x4_t mk135_0 = vld1q_u32(in - stride_mag + 1);
589 const uint32x4_t mk135_1 = vld1q_u32(in + stride_mag - 1);
590 uint32x4_t mask3 = vceqq_u32(pc32, vdupq_n_u32(3));
591 mask3 = vandq_u32(mask3, vcgeq_u32(mc, mk135_0));
592 mask3 = vandq_u32(mask3, vcgeq_u32(mc, mk135_1));
595 mask0 = vorrq_u32(mask0, mask1);
596 mask2 = vorrq_u32(mask2, mask3);
597 mask0 = vorrq_u32(mask0, mask2);
599 mc = vbslq_u32(mask0, mc, vdupq_n_u32(0));
602 mask0 = vcgtq_u32(mc, vdupq_n_u32(upper_thr));
605 mask1 = vcleq_u32(mc, vdupq_n_u32(lower_thr));
607 // mc <= upper_thr && mc > lower_thr
608 mask2 = vcleq_u32(mc, vdupq_n_u32(upper_thr));
609 mask2 = vandq_u32(mask2, vcgtq_u32(mc, vdupq_n_u32(lower_thr)));
611 mc = vbslq_u32(mask0, vdupq_n_u32(EDGE), mc);
612 mc = vbslq_u32(mask1, vdupq_n_u32(NO_EDGE), mc);
613 mc = vbslq_u32(mask2, vdupq_n_u32(MAYBE), mc);
615 return vmovn_u32(mc);
618 /** Computes edge tracing when is called by edge_trace_U8_U8 recursively
620 * @param[in] in Pointer to source image. Data type supported U8
621 * @param[out] out Pointer to destination image. Data type supported U8
622 * @param[in] in_stride Stride of the input image
623 * @param[in] out_stride Stride of the output image
625 void edge_trace_recursive_U8_U8(uint8_t *__restrict in, uint8_t *__restrict out, const int32_t in_stride, const int32_t out_stride)
627 // Look for MAYBE pixels in 8 directions
631 uint8_t pixel = *(in - 1);
635 // Touched a MAYBE point. MAYBE becomes EDGE
638 edge_trace_recursive_U8_U8(in - 1, out - 1, in_stride, out_stride);
646 // Touched a MAYBE point. MAYBE becomes EDGE
649 edge_trace_recursive_U8_U8(in + 1, out + 1, in_stride, out_stride);
660 // Touched a MAYBE point. MAYBE becomes EDGE
663 edge_trace_recursive_U8_U8(in - 1, out - 1, in_stride, out_stride);
671 // Touched a MAYBE point. MAYBE becomes EDGE
674 edge_trace_recursive_U8_U8(in, out, in_stride, out_stride);
682 // Touched a MAYBE point. MAYBE becomes EDGE
685 edge_trace_recursive_U8_U8(in + 1, out + 1, in_stride, out_stride);
689 out += out_stride * 2;
696 // Touched a MAYBE point. MAYBE becomes EDGE
699 edge_trace_recursive_U8_U8(in - 1, out - 1, in_stride, out_stride);
707 // Touched a MAYBE point. MAYBE becomes EDGE
710 edge_trace_recursive_U8_U8(in, out, in_stride, out_stride);
718 // Touched a MAYBE point. MAYBE becomes EDGE
721 edge_trace_recursive_U8_U8(in + 1, out + 1, in_stride, out_stride);
726 void NEGradientFP16Kernel::configure(const ITensor *gx, const ITensor *gy, ITensor *magnitude, ITensor *phase, int32_t norm_type)
728 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(gx, 1, DataType::S16, DataType::S32);
729 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(gy, 1, DataType::S16, DataType::S32);
730 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(magnitude, 1, DataType::U16, DataType::U32);
731 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(phase, 1, DataType::U8);
732 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(gx, gy);
733 ARM_COMPUTE_ERROR_ON_MSG(element_size_from_data_type(gx->info()->data_type()) != element_size_from_data_type(magnitude->info()->data_type()), "Magnitude must have the same element size as Gx and Gy");
737 _magnitude = magnitude;
740 if(_gx->info()->data_type() == DataType::S16)
744 _func = &fp16::mag_phase_l1norm_S16_S16_U16_U8;
748 _func = &fp16::mag_phase_l2norm_S16_S16_U16_U8;
755 _func = &fp16::mag_phase_l1norm_S32_S32_U32_U8;
759 _func = &fp16::mag_phase_l2norm_S32_S32_U32_U8;
763 constexpr unsigned int num_elems_processed_per_iteration = 32;
765 // Configure kernel window
766 Window win = calculate_max_window(*_gx->info(), Steps(num_elems_processed_per_iteration));
768 AccessWindowHorizontal gx_access(_gx->info(), 0, num_elems_processed_per_iteration);
769 AccessWindowHorizontal gy_access(_gy->info(), 0, num_elems_processed_per_iteration);
770 AccessWindowHorizontal mag_access(_magnitude->info(), 0, num_elems_processed_per_iteration);
771 AccessWindowHorizontal phase_access(_phase->info(), 0, num_elems_processed_per_iteration);
773 update_window_and_padding(win, gx_access, gy_access, mag_access, phase_access);
775 mag_access.set_valid_region(win, _gx->info()->valid_region());
776 phase_access.set_valid_region(win, _gx->info()->valid_region());
778 INEKernel::configure(win);
784 inline uint8x8_t phase_quantization(const float32x4x2_t &gx, const float32x4x2_t &gy)
786 // Constant use for evaluating score1 and score3
787 static const float32x4_t const45 = vdupq_n_f32(0.70710678118655f);
788 static const float32x4_t zero = vdupq_n_f32(0.0f);
789 static const float32x4_t one = vdupq_n_f32(1.0f);
790 static const float32x4_t two = vdupq_n_f32(2.0f);
791 static const float32x4_t three = vdupq_n_f32(3.0f);
794 const float32x4x2_t score0 =
797 vabsq_f32(gx.val[0]),
803 const float32x4x2_t score2 =
806 vabsq_f32(gy.val[0]),
811 // Score1 and Score3: ( sqrt(2) / 2, sqrt(2) / 2 ) - ( -sqrt(2) / 2, sqrt(2) / 2 )
812 float32x4x2_t score1 =
815 vmulq_f32(gy.val[0], const45),
816 vmulq_f32(gy.val[1], const45)
820 float32x4x2_t score3 = score1;
822 score1.val[0] = vmlaq_f32(score1.val[0], gx.val[0], const45);
823 score1.val[1] = vmlaq_f32(score1.val[1], gx.val[1], const45);
824 score3.val[0] = vmlsq_f32(score3.val[0], gx.val[0], const45);
825 score3.val[1] = vmlsq_f32(score3.val[1], gx.val[1], const45);
827 score1.val[0] = vabsq_f32(score1.val[0]);
828 score1.val[1] = vabsq_f32(score1.val[1]);
829 score3.val[0] = vabsq_f32(score3.val[0]);
830 score3.val[1] = vabsq_f32(score3.val[1]);
832 float32x4x2_t phase =
840 float32x4x2_t old_score = score0;
842 // score1 > old_score?
846 vcgtq_f32(score1.val[0], old_score.val[0]),
847 vcgtq_f32(score1.val[1], old_score.val[1])
851 phase.val[0] = vbslq_f32(mask.val[0], one, phase.val[0]);
852 phase.val[1] = vbslq_f32(mask.val[1], one, phase.val[1]);
853 old_score.val[0] = vbslq_f32(mask.val[0], score1.val[0], old_score.val[0]);
854 old_score.val[1] = vbslq_f32(mask.val[1], score1.val[1], old_score.val[1]);
856 // score2 > old_score?
857 mask.val[0] = vcgtq_f32(score2.val[0], old_score.val[0]);
858 mask.val[1] = vcgtq_f32(score2.val[1], old_score.val[1]);
860 phase.val[0] = vbslq_f32(mask.val[0], two, phase.val[0]);
861 phase.val[1] = vbslq_f32(mask.val[1], two, phase.val[1]);
862 old_score.val[0] = vbslq_f32(mask.val[0], score2.val[0], old_score.val[0]);
863 old_score.val[1] = vbslq_f32(mask.val[1], score2.val[1], old_score.val[1]);
865 // score3 > old_score?
866 mask.val[0] = vcgtq_f32(score3.val[0], old_score.val[0]);
867 mask.val[1] = vcgtq_f32(score3.val[1], old_score.val[1]);
869 phase.val[0] = vbslq_f32(mask.val[0], three, phase.val[0]);
870 phase.val[1] = vbslq_f32(mask.val[1], three, phase.val[1]);
871 old_score.val[0] = vbslq_f32(mask.val[0], score3.val[0], old_score.val[0]);
872 old_score.val[1] = vbslq_f32(mask.val[1], score3.val[1], old_score.val[1]);
874 // Convert from float32x4_t to uint8x8_t
875 return vmovn_u16(vcombine_u16(vmovn_u32(vcvtq_u32_f32(phase.val[0])),
876 vmovn_u32(vcvtq_u32_f32(phase.val[1]))));
879 /* Computes the gradient phase if gradient_size = 3 or 5. The output is quantized.
880 * 0 = 0°, 1 = 45°, 2 = 90°, 3 = 135°
882 * @param[in] gx Gx component
883 * @param[in] gy Gy component
885 * @return quantized phase for 8 pixels
887 inline uint8x8_t phase_quantization_S16_S16(int16x8_t gx, int16x8_t gy)
890 const float32x4x2_t gx_f32 =
893 vcvtq_f32_s32(vmovl_s16(vget_low_s16(gx))),
894 vcvtq_f32_s32(vmovl_s16(vget_high_s16(gx)))
898 const float32x4x2_t gy_f32 =
901 vcvtq_f32_s32(vmovl_s16(vget_low_s16(gy))),
902 vcvtq_f32_s32(vmovl_s16(vget_high_s16(gy)))
906 return phase_quantization(gx_f32, gy_f32);
909 /* Computes the gradient phase if gradient_size = 7. The output is quantized.
910 * 0 = 0°, 1 = 45°, 2 = 90°, 3 = 135°
912 * @param[in] gx Gx component
913 * @param[in] gy Gy component
915 * @return quantized phase for 8 pixels
917 inline uint8x8_t phase_quantization_S32_S32(const int32x4x2_t &gx, const int32x4x2_t &gy)
920 const float32x4x2_t gx_f32 =
923 vcvtq_f32_s32(gx.val[0]),
924 vcvtq_f32_s32(gx.val[1])
928 const float32x4x2_t gy_f32 =
931 vcvtq_f32_s32(gy.val[0]),
932 vcvtq_f32_s32(gy.val[1])
936 return phase_quantization(gx_f32, gy_f32);
939 /* Computes the magnitude using the L1-norm type if gradient_size = 3 or 5
941 * @param[in] gx Gx component
942 * @param[in] gy Gy component
944 * @return magnitude for 8 pixels
946 inline uint16x8_t mag_l1_S16_S16(int16x8_t gx, int16x8_t gy)
948 return vaddq_u16(vreinterpretq_u16_s16(vabsq_s16(gx)),
949 vreinterpretq_u16_s16(vabsq_s16(gy)));
952 /* Computes the magnitude using the L1-norm type if gradient_size = 7
954 * @param[in] gx Gx component
955 * @param[in] gy Gy component
957 * @return magnitude for 8 pixels
959 inline uint32x4x2_t mag_l1_S32_S32(const int32x4x2_t &gx, const int32x4x2_t &gy)
961 const uint32x4x2_t gx_abs =
964 vreinterpretq_u32_s32(vabsq_s32(gx.val[0])),
965 vreinterpretq_u32_s32(vabsq_s32(gx.val[1]))
969 const uint32x4x2_t gy_abs =
972 vreinterpretq_u32_s32(vabsq_s32(gy.val[0])),
973 vreinterpretq_u32_s32(vabsq_s32(gy.val[1]))
977 const uint32x4x2_t output =
980 vaddq_u32(gx_abs.val[0], gy_abs.val[0]),
981 vaddq_u32(gx_abs.val[1], gy_abs.val[1])
988 inline float32x4x2_t mag_l2(const float32x4x2_t &gx, const float32x4x2_t &gy)
991 float32x4x2_t magnitude =
994 vmulq_f32(gx.val[0], gx.val[0]),
995 vmulq_f32(gx.val[1], gx.val[1])
1000 magnitude.val[0] = vmlaq_f32(magnitude.val[0], gy.val[0], gy.val[0]);
1001 magnitude.val[1] = vmlaq_f32(magnitude.val[1], gy.val[1], gy.val[1]);
1004 magnitude.val[0] = vmulq_f32(vrsqrteq_f32(magnitude.val[0]), magnitude.val[0]);
1005 magnitude.val[1] = vmulq_f32(vrsqrteq_f32(magnitude.val[1]), magnitude.val[1]);
1010 /* Computes the magnitude using L2-norm if gradient_size = 3 or 5
1012 * @param[in] gx Gx component
1013 * @param[in] gy Gy component
1015 * @return magnitude for 8 pixels
1017 inline uint16x8_t mag_l2_S16_S16(int16x8_t gx, int16x8_t gy)
1019 // Compute magnitude using L2 normalization
1020 const float32x4x2_t gx2 =
1023 vcvtq_f32_s32(vmovl_s16(vget_low_s16(gx))),
1024 vcvtq_f32_s32(vmovl_s16(vget_high_s16(gx)))
1028 const float32x4x2_t gy2 =
1031 vcvtq_f32_s32(vmovl_s16(vget_low_s16(gy))),
1032 vcvtq_f32_s32(vmovl_s16(vget_high_s16(gy)))
1036 const float32x4x2_t magnitude = mag_l2(gx2, gy2);
1038 // Store magnitude - Convert to uint16x8
1039 return vcombine_u16(vmovn_u32(vcvtq_u32_f32(magnitude.val[0])),
1040 vmovn_u32(vcvtq_u32_f32(magnitude.val[1])));
1043 /* Computes the magnitude using L2-norm if gradient_size = 7
1045 * @param[in] gx Gx component
1046 * @param[in] gy Gy component
1048 * @return magnitude for 8 pixels
1050 inline uint32x4x2_t mag_l2_S32_S32(const int32x4x2_t &gx, const int32x4x2_t &gy)
1052 // Compute magnitude using L2 normalization
1056 vcvtq_f32_s32(gx.val[0]),
1057 vcvtq_f32_s32(gx.val[1])
1064 vcvtq_f32_s32(gy.val[0]),
1065 vcvtq_f32_s32(gy.val[1])
1069 const float32x4x2_t magnitude = mag_l2(gx2, gy2);
1070 const uint32x4x2_t mag32 =
1073 vcvtq_u32_f32(magnitude.val[0]),
1074 vcvtq_u32_f32(magnitude.val[1])
1081 /* Gradient function used when the gradient size = 3 or 5 and when the norm_type = L1-norm
1083 * @param[in] gx_ptr Pointer to source image. Gx image. Data type supported S16
1084 * @param[in] gy_ptr Pointer to source image. Gy image. Data type supported S16
1085 * @param[out] magnitude_ptr Pointer to destination image. Magnitude. Data type supported U16
1086 * @param[out] phase_ptr Pointer to destination image. Quantized phase. Data type supported U8
1088 void mag_phase_l1norm_S16_S16_U16_U8(const void *__restrict gx_ptr, const void *__restrict gy_ptr, void *__restrict magnitude_ptr, void *__restrict phase_ptr)
1090 const auto gx = static_cast<const int16_t *__restrict>(gx_ptr);
1091 const auto gy = static_cast<const int16_t *__restrict>(gy_ptr);
1092 const auto magnitude = static_cast<uint16_t *__restrict>(magnitude_ptr);
1093 const auto phase = static_cast<uint8_t *__restrict>(phase_ptr);
1095 const int16x8x4_t gx_val =
1105 const int16x8x4_t gy_val =
1115 // Compute and store phase
1116 vst1_u8(phase + 0, phase_quantization_S16_S16(gx_val.val[0], gy_val.val[0]));
1117 vst1_u8(phase + 8, phase_quantization_S16_S16(gx_val.val[1], gy_val.val[1]));
1118 vst1_u8(phase + 16, phase_quantization_S16_S16(gx_val.val[2], gy_val.val[2]));
1119 vst1_u8(phase + 24, phase_quantization_S16_S16(gx_val.val[3], gy_val.val[3]));
1121 // Compute ans store magnitude using L1 normalization
1122 vst1q_u16(magnitude + 0, mag_l1_S16_S16(gx_val.val[0], gy_val.val[0]));
1123 vst1q_u16(magnitude + 8, mag_l1_S16_S16(gx_val.val[1], gy_val.val[1]));
1124 vst1q_u16(magnitude + 16, mag_l1_S16_S16(gx_val.val[2], gy_val.val[2]));
1125 vst1q_u16(magnitude + 24, mag_l1_S16_S16(gx_val.val[3], gy_val.val[3]));
1128 /* Gradient function used when the gradient size = 3 or 5 and when the norm_type = L2-norm
1130 * @param[in] gx_ptr Pointer to source image. Gx image. Data type supported S16
1131 * @param[in] gy_ptr Pointer to source image. Gy image. Data type supported S16
1132 * @param[out] magnitude_ptr Pointer to destination image. Magnitude. Data type supported U16
1133 * @param[out] phase_ptr Pointer to destination image. Quantized phase. Data type supported U8
1135 void mag_phase_l2norm_S16_S16_U16_U8(const void *__restrict gx_ptr, const void *__restrict gy_ptr, void *__restrict magnitude_ptr, void *__restrict phase_ptr)
1137 const auto gx = static_cast<const int16_t *__restrict>(gx_ptr);
1138 const auto gy = static_cast<const int16_t *__restrict>(gy_ptr);
1139 const auto magnitude = static_cast<uint16_t *__restrict>(magnitude_ptr);
1140 const auto phase = static_cast<uint8_t *__restrict>(phase_ptr);
1142 const int16x8x4_t gx_val =
1152 const int16x8x4_t gy_val =
1162 // Compute and store phase
1163 vst1_u8(phase + 0, phase_quantization_S16_S16(gx_val.val[0], gy_val.val[0]));
1164 vst1_u8(phase + 8, phase_quantization_S16_S16(gx_val.val[1], gy_val.val[1]));
1165 vst1_u8(phase + 16, phase_quantization_S16_S16(gx_val.val[2], gy_val.val[2]));
1166 vst1_u8(phase + 24, phase_quantization_S16_S16(gx_val.val[3], gy_val.val[3]));
1168 // Compute and store magnitude using L2 normalization
1169 vst1q_u16(magnitude + 0, mag_l2_S16_S16(gx_val.val[0], gy_val.val[0]));
1170 vst1q_u16(magnitude + 8, mag_l2_S16_S16(gx_val.val[1], gy_val.val[1]));
1171 vst1q_u16(magnitude + 16, mag_l2_S16_S16(gx_val.val[2], gy_val.val[2]));
1172 vst1q_u16(magnitude + 24, mag_l2_S16_S16(gx_val.val[3], gy_val.val[3]));
1175 /* Gradient function used when the gradient size = 7 and when the norm_type = L1-norm
1177 * @param[in] gx_ptr Pointer to source image. Gx image. Data type supported S32
1178 * @param[in] gy_ptr Pointer to source image. Gy image. Data type supported S32
1179 * @param[out] magnitude_ptr Pointer to destination image. Magnitude. Data type supported U32
1180 * @param[out] phase_ptr Pointer to destination image. Quantized phase. Data type support U8
1182 void mag_phase_l1norm_S32_S32_U32_U8(const void *__restrict gx_ptr, const void *__restrict gy_ptr, void *__restrict magnitude_ptr, void *__restrict phase_ptr)
1184 auto gx = static_cast<const int32_t *__restrict>(gx_ptr);
1185 auto gy = static_cast<const int32_t *__restrict>(gy_ptr);
1186 auto magnitude = static_cast<uint32_t *__restrict>(magnitude_ptr);
1187 auto phase = static_cast<uint8_t *__restrict>(phase_ptr);
1189 // Process low and high part
1190 for(size_t i = 0; i < 2; ++i, gx += 16, gy += 16, magnitude += 16, phase += 16)
1192 const int32x4x2_t gx0 =
1200 const int32x4x2_t gx1 =
1208 const int32x4x2_t gy0 =
1216 const int32x4x2_t gy1 =
1224 // Compute and store phase
1225 vst1_u8(phase + 0, phase_quantization_S32_S32(gx0, gy0));
1226 vst1_u8(phase + 8, phase_quantization_S32_S32(gx1, gy1));
1228 // Compute magnitude using L1 normalization
1229 const uint32x4x2_t mag0 = mag_l1_S32_S32(gx0, gy0);
1230 const uint32x4x2_t mag1 = mag_l1_S32_S32(gx1, gy1);
1233 vst1q_u32(magnitude + 0, mag0.val[0]);
1234 vst1q_u32(magnitude + 4, mag0.val[1]);
1235 vst1q_u32(magnitude + 8, mag1.val[0]);
1236 vst1q_u32(magnitude + 12, mag1.val[1]);
1240 /* Gradient function used when the gradient size = 7 and when the norm_type = L2-norm
1242 * @param[in] gx_ptr Pointer to source image. Gx image. Data type supported S32
1243 * @param[in] gy_ptr Pointer to source image. Gy image. Data type supported S32
1244 * @param[out] magnitude_ptr Pointer to destination image. Magnitude. Data type supported U32
1245 * @param[out] phase_ptr Pointer to destination image. Quantized phase. Data type supported U8
1247 void mag_phase_l2norm_S32_S32_U32_U8(const void *__restrict gx_ptr, const void *__restrict gy_ptr, void *__restrict magnitude_ptr, void *__restrict phase_ptr)
1249 auto gx = static_cast<const int32_t *__restrict>(gx_ptr);
1250 auto gy = static_cast<const int32_t *__restrict>(gy_ptr);
1251 auto magnitude = static_cast<uint32_t *__restrict>(magnitude_ptr);
1252 auto phase = static_cast<uint8_t *__restrict>(phase_ptr);
1254 // Process low and high part
1255 for(size_t i = 0; i < 2; ++i, gx += 16, gy += 16, magnitude += 16, phase += 16)
1257 const int32x4x2_t gx0 =
1265 const int32x4x2_t gx1 =
1273 const int32x4x2_t gy0 =
1281 const int32x4x2_t gy1 =
1289 // Compute and store phase
1290 vst1_u8(phase + 0, phase_quantization_S32_S32(gx0, gy0));
1291 vst1_u8(phase + 8, phase_quantization_S32_S32(gx1, gy1));
1293 // Compute magnitude using L2 normalization
1294 const uint32x4x2_t mag0 = mag_l2_S32_S32(gx0, gy0);
1295 const uint32x4x2_t mag1 = mag_l2_S32_S32(gx1, gy1);
1298 vst1q_u32(magnitude + 0, mag0.val[0]);
1299 vst1q_u32(magnitude + 4, mag0.val[1]);
1300 vst1q_u32(magnitude + 8, mag1.val[0]);
1301 vst1q_u32(magnitude + 12, mag1.val[1]);
1305 /* Computes non-maxima suppression and hysteresis when the gradient size = 3 or 5
1307 * @param[in] magnitude_ptr Pointer to source image. Magnitude. Data type supported U16
1308 * @param[in] phase_ptr Pointer to source image. Quantized phase. Data type supported U8
1309 * @param[out] output_ptr Pointer to output image. Data type supported U8
1310 * @param[in] stride_mag Stride of magnitude image
1311 * @param[in] lower_thr Lower threshold used for the hysteresis
1312 * @param[in] upper_thr Upper threshold used for the hysteresis
1314 void non_max_suppression_U16_U8_U8(const void *__restrict magnitude_ptr, const void *__restrict phase_ptr, void *__restrict output_ptr, const uint32_t stride_mag, const int32_t lower_thr,
1315 const int32_t upper_thr)
1317 const auto magnitude = static_cast<const uint16_t *__restrict>(magnitude_ptr);
1318 const auto phase = static_cast<const uint8_t *__restrict>(phase_ptr);
1319 const auto output = static_cast<uint8_t *__restrict>(output_ptr);
1321 // Get magnitude and phase of the centre pixels
1322 uint16x8_t mc = vld1q_u16(magnitude);
1324 // Angle_quantized: 0 = 0°, 1 = 45°, 2 = 90°, 3 = 135°
1325 const uint16x8_t pc16 = vmovl_u8(vld1_u8(phase));
1328 const uint16x8_t mk0_0 = vld1q_u16(magnitude - 1);
1329 const uint16x8_t mk0_1 = vld1q_u16(magnitude + 1);
1330 uint16x8_t mask0 = vceqq_u16(pc16, vdupq_n_u16(0));
1331 mask0 = vandq_u16(mask0, vcgeq_u16(mc, mk0_0));
1332 mask0 = vandq_u16(mask0, vcgeq_u16(mc, mk0_1));
1335 const uint16x8_t mk45_0 = vld1q_u16(magnitude - stride_mag - 1);
1336 const uint16x8_t mk45_1 = vld1q_u16(magnitude + stride_mag + 1);
1337 uint16x8_t mask1 = vceqq_u16(pc16, vdupq_n_u16(1));
1338 mask1 = vandq_u16(mask1, vcgeq_u16(mc, mk45_0));
1339 mask1 = vandq_u16(mask1, vcgeq_u16(mc, mk45_1));
1342 const uint16x8_t mk90_0 = vld1q_u16(magnitude - stride_mag);
1343 const uint16x8_t mk90_1 = vld1q_u16(magnitude + stride_mag);
1344 uint16x8_t mask2 = vceqq_u16(pc16, vdupq_n_u16(2));
1345 mask2 = vandq_u16(mask2, vcgeq_u16(mc, mk90_0));
1346 mask2 = vandq_u16(mask2, vcgeq_u16(mc, mk90_1));
1349 const uint16x8_t mk135_0 = vld1q_u16(magnitude - stride_mag + 1);
1350 const uint16x8_t mk135_1 = vld1q_u16(magnitude + stride_mag - 1);
1351 uint16x8_t mask3 = vceqq_u16(pc16, vdupq_n_u16(3));
1352 mask3 = vandq_u16(mask3, vcgeq_u16(mc, mk135_0));
1353 mask3 = vandq_u16(mask3, vcgeq_u16(mc, mk135_1));
1356 mask0 = vorrq_u16(mask0, mask1);
1357 mask2 = vorrq_u16(mask2, mask3);
1358 mask0 = vorrq_u16(mask0, mask2);
1360 mc = vbslq_u16(mask0, mc, vdupq_n_u16(0));
1363 mask0 = vcgtq_u16(mc, vdupq_n_u16(upper_thr));
1366 mask1 = vcleq_u16(mc, vdupq_n_u16(lower_thr));
1368 // mc <= upper_thr && mc > lower_thr
1369 mask2 = vcleq_u16(mc, vdupq_n_u16(upper_thr));
1370 mask2 = vandq_u16(mask2, vcgtq_u16(mc, vdupq_n_u16(lower_thr)));
1372 mc = vbslq_u16(mask0, vdupq_n_u16(EDGE), mc);
1373 mc = vbslq_u16(mask1, vdupq_n_u16(NO_EDGE), mc);
1374 mc = vbslq_u16(mask2, vdupq_n_u16(MAYBE), mc);
1376 vst1_u8(output, vmovn_u16(mc));
1379 inline uint16x4_t non_max_U32_helper(const uint32_t *input, const uint16x4_t pc, const uint32_t stride_mag, const int32_t lower_thr, const int32_t upper_thr)
1381 // Phase for 4 pixel
1382 const uint32x4_t pc32 = vmovl_u16(pc);
1384 // Get magnitude for 4 pixel
1385 uint32x4_t mc = vld1q_u32(input);
1387 // Angle_quantized: 0 = 0°, 1 = 45°, 2 = 90°, 3 = 135°
1389 const uint32x4_t mk0_0 = vld1q_u32(input - 1);
1390 const uint32x4_t mk0_1 = vld1q_u32(input + 1);
1391 uint32x4_t mask0 = vceqq_u32(pc32, vdupq_n_u32(0));
1392 mask0 = vandq_u32(mask0, vcgeq_u32(mc, mk0_0));
1393 mask0 = vandq_u32(mask0, vcgeq_u32(mc, mk0_1));
1396 const uint32x4_t mk45_0 = vld1q_u32(input - stride_mag - 1);
1397 const uint32x4_t mk45_1 = vld1q_u32(input + stride_mag + 1);
1398 uint32x4_t mask1 = vceqq_u32(pc32, vdupq_n_u32(1));
1399 mask1 = vandq_u32(mask1, vcgeq_u32(mc, mk45_0));
1400 mask1 = vandq_u32(mask1, vcgeq_u32(mc, mk45_1));
1403 const uint32x4_t mk90_0 = vld1q_u32(input - stride_mag);
1404 const uint32x4_t mk90_1 = vld1q_u32(input + stride_mag);
1405 uint32x4_t mask2 = vceqq_u32(pc32, vdupq_n_u32(2));
1406 mask2 = vandq_u32(mask2, vcgeq_u32(mc, mk90_0));
1407 mask2 = vandq_u32(mask2, vcgeq_u32(mc, mk90_1));
1410 const uint32x4_t mk135_0 = vld1q_u32(input - stride_mag + 1);
1411 const uint32x4_t mk135_1 = vld1q_u32(input + stride_mag - 1);
1412 uint32x4_t mask3 = vceqq_u32(pc32, vdupq_n_u32(3));
1413 mask3 = vandq_u32(mask3, vcgeq_u32(mc, mk135_0));
1414 mask3 = vandq_u32(mask3, vcgeq_u32(mc, mk135_1));
1417 mask0 = vorrq_u32(mask0, mask1);
1418 mask2 = vorrq_u32(mask2, mask3);
1419 mask0 = vorrq_u32(mask0, mask2);
1421 mc = vbslq_u32(mask0, mc, vdupq_n_u32(0));
1424 mask0 = vcgtq_u32(mc, vdupq_n_u32(upper_thr));
1427 mask1 = vcleq_u32(mc, vdupq_n_u32(lower_thr));
1429 // mc <= upper_thr && mc > lower_thr
1430 mask2 = vcleq_u32(mc, vdupq_n_u32(upper_thr));
1431 mask2 = vandq_u32(mask2, vcgtq_u32(mc, vdupq_n_u32(lower_thr)));
1433 mc = vbslq_u32(mask0, vdupq_n_u32(EDGE), mc);
1434 mc = vbslq_u32(mask1, vdupq_n_u32(NO_EDGE), mc);
1435 mc = vbslq_u32(mask2, vdupq_n_u32(MAYBE), mc);
1437 return vmovn_u32(mc);
1440 /* Computes non-maxima suppression and hysteresis when the gradient_size = 7
1442 * @param[in] magnitude_ptr Pointer to source image. Magnitude. Data type supported U32
1443 * @param[in] phase_ptr Pointer to source image. Quantized phase. Data type supported U8
1444 * @param[out] output_ptr Pointer to destination image. Data type supported U8
1445 * @param[in] stride_mag Stride of magnitude image
1446 * @param[in] lower_thr Lower threshold used for the hysteresis
1447 * @param[in] upper_thr Upper threshold used for the hysteresis
1449 void non_max_suppression_U32_U8_U8(const void *__restrict magnitude_ptr, const void *__restrict phase_ptr, void *__restrict output_ptr, const uint32_t stride_mag, const int32_t lower_thr,
1450 const int32_t upper_thr)
1452 const auto magnitude = static_cast<const uint32_t *__restrict>(magnitude_ptr);
1453 const auto phase = static_cast<const uint8_t *__restrict>(phase_ptr);
1454 const auto output = static_cast<uint8_t *__restrict>(output_ptr);
1456 // Get phase for 8 pixel
1457 const uint16x8_t pc16 = vmovl_u8(vld1_u8(phase));
1459 // Compute non maxima suppression
1460 const uint16x4x2_t res =
1463 non_max_U32_helper(magnitude, vget_low_u16(pc16), stride_mag, lower_thr, upper_thr),
1464 non_max_U32_helper(magnitude + 4, vget_high_u16(pc16), stride_mag, lower_thr, upper_thr)
1469 vst1_u8(output, vmovn_u16(vcombine_u16(res.val[0], res.val[1])));
1472 /* Computes edge tracing when is called by edge_trace_U8_U8 recursively
1474 * @param[in] input Pointer to source image. Data type supported U8
1475 * @param[out] output Pointer to destination image. Data type supported U8
1476 * @param[in] input_stride Stride of the input image
1477 * @param[in] output_stride Stride of the output image
1479 void edge_trace_recursive_U8_U8(uint8_t *__restrict input, uint8_t *__restrict output, const int32_t input_stride, const int32_t output_stride)
1481 // Look for MAYBE pixels in 8 directions
1485 uint8_t pixel = *(input - 1);
1489 // Touched a MAYBE point. MAYBE becomes EDGE
1490 *(input - 1) = EDGE;
1492 edge_trace_recursive_U8_U8(input - 1, output - 1, input_stride, output_stride);
1496 pixel = *(input + 1);
1500 // Touched a MAYBE point. MAYBE becomes EDGE
1501 *(input + 1) = EDGE;
1503 edge_trace_recursive_U8_U8(input + 1, output + 1, input_stride, output_stride);
1506 input -= input_stride;
1507 output -= output_stride;
1510 pixel = *(input - 1);
1514 // Touched a MAYBE point. MAYBE becomes EDGE
1515 *(input - 1) = EDGE;
1517 edge_trace_recursive_U8_U8(input - 1, output - 1, input_stride, output_stride);
1525 // Touched a MAYBE point. MAYBE becomes EDGE
1528 edge_trace_recursive_U8_U8(input, output, input_stride, output_stride);
1532 pixel = *(input + 1);
1536 // Touched a MAYBE point. MAYBE becomes EDGE
1537 *(input + 1) = EDGE;
1539 edge_trace_recursive_U8_U8(input + 1, output + 1, input_stride, output_stride);
1542 input += input_stride * 2;
1543 output += output_stride * 2;
1546 pixel = *(input - 1);
1550 // Touched a MAYBE point. MAYBE becomes EDGE
1551 *(input - 1) = EDGE;
1553 edge_trace_recursive_U8_U8(input - 1, output - 1, input_stride, output_stride);
1561 // Touched a MAYBE point. MAYBE becomes EDGE
1564 edge_trace_recursive_U8_U8(input, output, input_stride, output_stride);
1568 pixel = *(input + 1);
1572 // Touched a MAYBE point. MAYBE becomes EDGE
1573 *(input + 1) = EDGE;
1575 edge_trace_recursive_U8_U8(input + 1, output + 1, input_stride, output_stride);
1579 /* Computes edge tracing
1581 * @param[in] input Pointer to source image. Data type supported U8
1582 * @param[out] output Pointer to destination image. Data type supported U8
1583 * @param[in] input_stride Stride of the input image
1584 * @param[in] output_stride Stride of the output image
1586 void edge_trace_U8_U8(uint8_t *__restrict input, uint8_t *__restrict output, const int32_t input_stride, const int32_t output_stride)
1588 if(*input == NO_EDGE)
1592 // Check if EDGE and not yet touched
1593 else if((*input == EDGE) && (*output == NO_EDGE))
1595 edge_trace_recursive_U8_U8(input, output, input_stride, output_stride);
1600 NEGradientKernel::NEGradientKernel()
1601 : _func(nullptr), _gx(nullptr), _gy(nullptr), _magnitude(nullptr), _phase(nullptr)
1605 void NEGradientKernel::configure(const ITensor *gx, const ITensor *gy, ITensor *magnitude, ITensor *phase, int32_t norm_type)
1607 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(gx, 1, DataType::S16, DataType::S32);
1608 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(gy, 1, DataType::S16, DataType::S32);
1609 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(magnitude, 1, DataType::U16, DataType::U32);
1610 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(phase, 1, DataType::U8);
1611 ARM_COMPUTE_ERROR_ON_MSG(element_size_from_data_type(gx->info()->data_type()) != element_size_from_data_type(gy->info()->data_type()), "Gx and Gy must have the same element size");
1612 ARM_COMPUTE_ERROR_ON_MSG(element_size_from_data_type(gx->info()->data_type()) != element_size_from_data_type(magnitude->info()->data_type()), "Magnitude must have the same element size as Gx and Gy");
1616 _magnitude = magnitude;
1619 if(_gx->info()->data_type() == DataType::S16)
1623 _func = &mag_phase_l1norm_S16_S16_U16_U8;
1627 _func = &mag_phase_l2norm_S16_S16_U16_U8;
1634 _func = &mag_phase_l1norm_S32_S32_U32_U8;
1638 _func = &mag_phase_l2norm_S32_S32_U32_U8;
1642 constexpr unsigned int num_elems_processed_per_iteration = 32;
1644 // Configure kernel window
1645 Window win = calculate_max_window(*_gx->info(), Steps(num_elems_processed_per_iteration));
1647 AccessWindowHorizontal gx_access(_gx->info(), 0, num_elems_processed_per_iteration);
1648 AccessWindowHorizontal gy_access(_gy->info(), 0, num_elems_processed_per_iteration);
1649 AccessWindowHorizontal mag_access(_magnitude->info(), 0, num_elems_processed_per_iteration);
1650 AccessWindowHorizontal phase_access(_phase->info(), 0, num_elems_processed_per_iteration);
1652 update_window_and_padding(win, gx_access, gy_access, mag_access, phase_access);
1654 mag_access.set_valid_region(win, _gx->info()->valid_region());
1655 phase_access.set_valid_region(win, _gx->info()->valid_region());
1657 INEKernel::configure(win);
1660 void NEGradientKernel::run(const Window &window)
1662 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
1663 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
1664 ARM_COMPUTE_ERROR_ON(_func == nullptr);
1665 Iterator gx(_gx, window);
1666 Iterator gy(_gy, window);
1667 Iterator magnitude(_magnitude, window);
1668 Iterator phase(_phase, window);
1670 execute_window_loop(window, [&](const Coordinates & id)
1672 (*_func)(gx.ptr(), gy.ptr(), magnitude.ptr(), phase.ptr());
1674 gx, gy, magnitude, phase);
1677 NEEdgeNonMaxSuppressionKernel::NEEdgeNonMaxSuppressionKernel()
1678 : _func(nullptr), _magnitude(nullptr), _phase(nullptr), _output(nullptr), _lower_thr(0), _upper_thr(0)
1682 BorderSize NEEdgeNonMaxSuppressionKernel::border_size() const
1684 return BorderSize(1);
1687 void NEEdgeNonMaxSuppressionKernel::configure(const ITensor *magnitude, const ITensor *phase, ITensor *output,
1688 int32_t upper_thr, int32_t lower_thr, bool border_undefined)
1690 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(magnitude, 1, DataType::U16, DataType::U32);
1691 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(phase, 1, DataType::U8);
1692 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
1694 _magnitude = magnitude;
1698 switch(_magnitude->info()->data_type())
1701 _func = &non_max_suppression_U16_U8_U8;
1704 _func = &non_max_suppression_U32_U8_U8;
1707 ARM_COMPUTE_ERROR("Unsupported data type!");
1711 _lower_thr = lower_thr;
1712 _upper_thr = upper_thr;
1714 constexpr unsigned int num_elems_processed_per_iteration = 8;
1715 constexpr unsigned int num_elems_read_per_iteration = 10;
1716 constexpr unsigned int num_rows_read_per_iteration = 3;
1718 // Configure kernel window
1719 Window win = calculate_max_window(*_magnitude->info(), Steps(num_elems_processed_per_iteration), border_undefined, border_size());
1721 AccessWindowRectangle mag_access(_magnitude->info(), -border_size().left, -border_size().top, num_elems_read_per_iteration, num_rows_read_per_iteration);
1722 AccessWindowHorizontal phase_access(_phase->info(), 0, num_elems_processed_per_iteration);
1723 AccessWindowHorizontal output_access(_output->info(), 0, num_elems_processed_per_iteration);
1725 update_window_and_padding(win, mag_access, phase_access, output_access);
1727 output_access.set_valid_region(win, _magnitude->info()->valid_region(), border_undefined, border_size());
1729 INEKernel::configure(win);
1732 void NEEdgeNonMaxSuppressionKernel::run(const Window &window)
1734 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
1735 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
1736 ARM_COMPUTE_ERROR_ON(_func == nullptr);
1737 Iterator magnitude(_magnitude, window);
1738 Iterator phase(_phase, window);
1739 Iterator output(_output, window);
1741 const size_t input1_stride = _magnitude->info()->strides_in_bytes()[1];
1742 const size_t input1_stride_ushort = input1_stride / data_size_from_type(_magnitude->info()->data_type());
1744 execute_window_loop(window, [&](const Coordinates & id)
1746 (*_func)(magnitude.ptr(), phase.ptr(), output.ptr(), input1_stride_ushort, _lower_thr, _upper_thr);
1748 magnitude, phase, output);
1751 NEEdgeTraceKernel::NEEdgeTraceKernel()
1752 : _input(nullptr), _output(nullptr)
1756 BorderSize NEEdgeTraceKernel::border_size() const
1758 return BorderSize(1);
1761 bool NEEdgeTraceKernel::is_parallelisable() const
1766 void NEEdgeTraceKernel::configure(ITensor *input, ITensor *output)
1768 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
1769 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
1774 constexpr unsigned int num_elems_processed_per_iteration = 1;
1776 // Configure kernel window
1777 Window win = calculate_max_window(*_input->info(), Steps(num_elems_processed_per_iteration));
1779 const ValidRegion &input_valid_region = input->info()->valid_region();
1780 const ValidRegion &output_valid_region = output->info()->valid_region();
1782 // Reads can occur within the valid region of the input + border
1783 AccessWindowStatic input_access(input->info(),
1784 input_valid_region.anchor[0] - border_size().left,
1785 input_valid_region.anchor[1] - border_size().top,
1786 input_valid_region.anchor[0] + input_valid_region.shape[0] + border_size().right,
1787 input_valid_region.anchor[1] + input_valid_region.shape[1] + border_size().bottom);
1789 // Writes can occur within the valid region of the output + border
1790 AccessWindowStatic output_access(output->info(),
1791 output_valid_region.anchor[0] - border_size().left,
1792 output_valid_region.anchor[1] - border_size().top,
1793 output_valid_region.anchor[0] + output_valid_region.shape[0] + border_size().right,
1794 output_valid_region.anchor[1] + output_valid_region.shape[1] + border_size().bottom);
1796 update_window_and_padding(win, input_access, output_access);
1798 output_access.set_valid_region(win, _input->info()->valid_region());
1800 INEKernel::configure(win);
1803 void NEEdgeTraceKernel::run(const Window &window)
1805 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
1806 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
1807 Iterator input(_input, window);
1808 Iterator output(_output, window);
1810 const size_t input_stride = _input->info()->strides_in_bytes()[1];
1811 const size_t output_stride = _output->info()->strides_in_bytes()[1];
1813 execute_window_loop(window, [&](const Coordinates & id)
1815 edge_trace_U8_U8(input.ptr(), output.ptr(), input_stride, output_stride);