2 * Copyright 2011 The LibYuv Project Authors. All rights reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include "libyuv/compare.h"
19 #include "libyuv/basic_types.h"
20 #include "libyuv/cpu_id.h"
21 #include "libyuv/row.h"
28 // hash seed of 5381 recommended.
29 // Internal C version of HashDjb2 with int sized count for efficiency.
30 uint32 HashDjb2_C(const uint8* src, int count, uint32 seed);
32 // This module is for Visual C x86
33 #if !defined(LIBYUV_DISABLE_X86) && \
34 (defined(_M_IX86) || \
35 (defined(__x86_64__) || (defined(__i386__) && !defined(__pic__))))
36 #define HAS_HASHDJB2_SSE41
37 uint32 HashDjb2_SSE41(const uint8* src, int count, uint32 seed);
40 #define HAS_HASHDJB2_AVX2
41 uint32 HashDjb2_AVX2(const uint8* src, int count, uint32 seed);
44 #endif // HAS_HASHDJB2_SSE41
46 // hash seed of 5381 recommended.
48 uint32 HashDjb2(const uint8* src, uint64 count, uint32 seed) {
49 const int kBlockSize = 1 << 15; // 32768;
51 uint32 (*HashDjb2_SSE)(const uint8* src, int count, uint32 seed) = HashDjb2_C;
52 #if defined(HAS_HASHDJB2_SSE41)
53 if (TestCpuFlag(kCpuHasSSE41)) {
54 HashDjb2_SSE = HashDjb2_SSE41;
57 #if defined(HAS_HASHDJB2_AVX2)
58 if (TestCpuFlag(kCpuHasAVX2)) {
59 HashDjb2_SSE = HashDjb2_AVX2;
63 while (count >= (uint64)(kBlockSize)) {
64 seed = HashDjb2_SSE(src, kBlockSize, seed);
68 remainder = (int)(count) & ~15;
70 seed = HashDjb2_SSE(src, remainder, seed);
74 remainder = (int)(count) & 15;
76 seed = HashDjb2_C(src, remainder, seed);
81 uint32 SumSquareError_C(const uint8* src_a, const uint8* src_b, int count);
82 #if !defined(LIBYUV_DISABLE_NEON) && \
83 (defined(__ARM_NEON__) || defined(LIBYUV_NEON) || defined(__aarch64__))
84 #define HAS_SUMSQUAREERROR_NEON
85 uint32 SumSquareError_NEON(const uint8* src_a, const uint8* src_b, int count);
87 #if !defined(LIBYUV_DISABLE_X86) && \
88 (defined(_M_IX86) || defined(__x86_64__) || defined(__i386__))
89 #define HAS_SUMSQUAREERROR_SSE2
90 uint32 SumSquareError_SSE2(const uint8* src_a, const uint8* src_b, int count);
92 // Visual C 2012 required for AVX2.
93 #if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && _MSC_VER >= 1700
94 #define HAS_SUMSQUAREERROR_AVX2
95 uint32 SumSquareError_AVX2(const uint8* src_a, const uint8* src_b, int count);
98 // TODO(fbarchard): Refactor into row function.
100 uint64 ComputeSumSquareError(const uint8* src_a, const uint8* src_b,
102 // SumSquareError returns values 0 to 65535 for each squared difference.
103 // Up to 65536 of those can be summed and remain within a uint32.
104 // After each block of 65536 pixels, accumulate into a uint64.
105 const int kBlockSize = 65536;
106 int remainder = count & (kBlockSize - 1) & ~31;
109 uint32 (*SumSquareError)(const uint8* src_a, const uint8* src_b, int count) =
111 #if defined(HAS_SUMSQUAREERROR_NEON)
112 if (TestCpuFlag(kCpuHasNEON)) {
113 SumSquareError = SumSquareError_NEON;
116 #if defined(HAS_SUMSQUAREERROR_SSE2)
117 if (TestCpuFlag(kCpuHasSSE2)) {
118 // Note only used for multiples of 16 so count is not checked.
119 SumSquareError = SumSquareError_SSE2;
122 #if defined(HAS_SUMSQUAREERROR_AVX2)
123 if (TestCpuFlag(kCpuHasAVX2)) {
124 // Note only used for multiples of 32 so count is not checked.
125 SumSquareError = SumSquareError_AVX2;
129 #pragma omp parallel for reduction(+: sse)
131 for (i = 0; i < (count - (kBlockSize - 1)); i += kBlockSize) {
132 sse += SumSquareError(src_a + i, src_b + i, kBlockSize);
134 src_a += count & ~(kBlockSize - 1);
135 src_b += count & ~(kBlockSize - 1);
137 sse += SumSquareError(src_a, src_b, remainder);
141 remainder = count & 31;
143 sse += SumSquareError_C(src_a, src_b, remainder);
149 uint64 ComputeSumSquareErrorPlane(const uint8* src_a, int stride_a,
150 const uint8* src_b, int stride_b,
151 int width, int height) {
155 if (stride_a == width &&
159 stride_a = stride_b = 0;
161 for (h = 0; h < height; ++h) {
162 sse += ComputeSumSquareError(src_a, src_b, width);
170 double SumSquareErrorToPsnr(uint64 sse, uint64 count) {
173 double mse = (double)(count) / (double)(sse);
174 psnr = 10.0 * log10(255.0 * 255.0 * mse);
176 psnr = kMaxPsnr; // Limit to prevent divide by 0
186 double CalcFramePsnr(const uint8* src_a, int stride_a,
187 const uint8* src_b, int stride_b,
188 int width, int height) {
189 const uint64 samples = width * height;
190 const uint64 sse = ComputeSumSquareErrorPlane(src_a, stride_a,
193 return SumSquareErrorToPsnr(sse, samples);
197 double I420Psnr(const uint8* src_y_a, int stride_y_a,
198 const uint8* src_u_a, int stride_u_a,
199 const uint8* src_v_a, int stride_v_a,
200 const uint8* src_y_b, int stride_y_b,
201 const uint8* src_u_b, int stride_u_b,
202 const uint8* src_v_b, int stride_v_b,
203 int width, int height) {
204 const uint64 sse_y = ComputeSumSquareErrorPlane(src_y_a, stride_y_a,
207 const int width_uv = (width + 1) >> 1;
208 const int height_uv = (height + 1) >> 1;
209 const uint64 sse_u = ComputeSumSquareErrorPlane(src_u_a, stride_u_a,
211 width_uv, height_uv);
212 const uint64 sse_v = ComputeSumSquareErrorPlane(src_v_a, stride_v_a,
214 width_uv, height_uv);
215 const uint64 samples = width * height + 2 * (width_uv * height_uv);
216 const uint64 sse = sse_y + sse_u + sse_v;
217 return SumSquareErrorToPsnr(sse, samples);
220 static const int64 cc1 = 26634; // (64^2*(.01*255)^2
221 static const int64 cc2 = 239708; // (64^2*(.03*255)^2
223 static double Ssim8x8_C(const uint8* src_a, int stride_a,
224 const uint8* src_b, int stride_b) {
232 for (i = 0; i < 8; ++i) {
234 for (j = 0; j < 8; ++j) {
237 sum_sq_a += src_a[j] * src_a[j];
238 sum_sq_b += src_b[j] * src_b[j];
239 sum_axb += src_a[j] * src_b[j];
247 const int64 count = 64;
248 // scale the constants by number of pixels
249 const int64 c1 = (cc1 * count * count) >> 12;
250 const int64 c2 = (cc2 * count * count) >> 12;
252 const int64 sum_a_x_sum_b = sum_a * sum_b;
254 const int64 ssim_n = (2 * sum_a_x_sum_b + c1) *
255 (2 * count * sum_axb - 2 * sum_a_x_sum_b + c2);
257 const int64 sum_a_sq = sum_a*sum_a;
258 const int64 sum_b_sq = sum_b*sum_b;
260 const int64 ssim_d = (sum_a_sq + sum_b_sq + c1) *
261 (count * sum_sq_a - sum_a_sq +
262 count * sum_sq_b - sum_b_sq + c2);
267 return ssim_n * 1.0 / ssim_d;
271 // We are using a 8x8 moving window with starting location of each 8x8 window
272 // on the 4x4 pixel grid. Such arrangement allows the windows to overlap
273 // block boundaries to penalize blocking artifacts.
275 double CalcFrameSsim(const uint8* src_a, int stride_a,
276 const uint8* src_b, int stride_b,
277 int width, int height) {
279 double ssim_total = 0;
280 double (*Ssim8x8)(const uint8* src_a, int stride_a,
281 const uint8* src_b, int stride_b) = Ssim8x8_C;
283 // sample point start with each 4x4 location
285 for (i = 0; i < height - 8; i += 4) {
287 for (j = 0; j < width - 8; j += 4) {
288 ssim_total += Ssim8x8(src_a + j, stride_a, src_b + j, stride_b);
292 src_a += stride_a * 4;
293 src_b += stride_b * 4;
296 ssim_total /= samples;
301 double I420Ssim(const uint8* src_y_a, int stride_y_a,
302 const uint8* src_u_a, int stride_u_a,
303 const uint8* src_v_a, int stride_v_a,
304 const uint8* src_y_b, int stride_y_b,
305 const uint8* src_u_b, int stride_u_b,
306 const uint8* src_v_b, int stride_v_b,
307 int width, int height) {
308 const double ssim_y = CalcFrameSsim(src_y_a, stride_y_a,
309 src_y_b, stride_y_b, width, height);
310 const int width_uv = (width + 1) >> 1;
311 const int height_uv = (height + 1) >> 1;
312 const double ssim_u = CalcFrameSsim(src_u_a, stride_u_a,
314 width_uv, height_uv);
315 const double ssim_v = CalcFrameSsim(src_v_a, stride_v_a,
317 width_uv, height_uv);
318 return ssim_y * 0.8 + 0.1 * (ssim_u + ssim_v);
323 } // namespace libyuv