aboutsummaryrefslogtreecommitdiff
path: root/drivers/webp/dsp
diff options
context:
space:
mode:
authorJuan Linietsky2014-02-09 22:10:30 -0300
committerJuan Linietsky2014-02-09 22:10:30 -0300
commit0b806ee0fc9097fa7bda7ac0109191c9c5e0a1ac (patch)
tree276c4d099e178eb67fbd14f61d77b05e3808e9e3 /drivers/webp/dsp
parent0e49da1687bc8192ed210947da52c9e5c5f301bb (diff)
downloadgodot-0b806ee.tar.gz
godot-0b806ee.tar.zst
godot-0b806ee.zip
GODOT IS OPEN SOURCE
Diffstat (limited to '')
-rw-r--r--drivers/webp/dsp/cpu.c80
-rw-r--r--drivers/webp/dsp/dec.c756
-rw-r--r--drivers/webp/dsp/dec_neon.c433
-rw-r--r--drivers/webp/dsp/dec_sse2.c956
-rw-r--r--drivers/webp/dsp/dsp.h224
-rw-r--r--drivers/webp/dsp/enc.c753
-rw-r--r--drivers/webp/dsp/enc_neon.c632
-rw-r--r--drivers/webp/dsp/enc_sse2.c957
-rw-r--r--drivers/webp/dsp/lossless.c1532
-rw-r--r--drivers/webp/dsp/lossless.h220
-rw-r--r--drivers/webp/dsp/upsampling.c366
-rw-r--r--drivers/webp/dsp/upsampling_neon.c265
-rw-r--r--drivers/webp/dsp/upsampling_sse2.c218
-rw-r--r--drivers/webp/dsp/yuv.c207
-rw-r--r--drivers/webp/dsp/yuv.h317
15 files changed, 7916 insertions, 0 deletions
diff --git a/drivers/webp/dsp/cpu.c b/drivers/webp/dsp/cpu.c
new file mode 100644
index 000000000..7a1f417a5
--- /dev/null
+++ b/drivers/webp/dsp/cpu.c
@@ -0,0 +1,80 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the COPYING file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+// -----------------------------------------------------------------------------
+//
+// CPU detection
+//
+// Author: Christian Duvivier (cduvivier@google.com)
+
+#include "./dsp.h"
+
+#if defined(__ANDROID__)
+#include <cpu-features.h>
+#endif
+
+//------------------------------------------------------------------------------
+// SSE2 detection.
+//
+
+// apple/darwin gcc-4.0.1 defines __PIC__, but not __pic__ with -fPIC.
+#if (defined(__pic__) || defined(__PIC__)) && defined(__i386__)
+static WEBP_INLINE void GetCPUInfo(int cpu_info[4], int info_type) {
+ __asm__ volatile (
+ "mov %%ebx, %%edi\n"
+ "cpuid\n"
+ "xchg %%edi, %%ebx\n"
+ : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
+ : "a"(info_type));
+}
+#elif defined(__i386__) || defined(__x86_64__)
+static WEBP_INLINE void GetCPUInfo(int cpu_info[4], int info_type) {
+ __asm__ volatile (
+ "cpuid\n"
+ : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
+ : "a"(info_type));
+}
+#elif defined(WEBP_MSC_SSE2)
+#define GetCPUInfo __cpuid
+#endif
+
+#if defined(__i386__) || defined(__x86_64__) || defined(WEBP_MSC_SSE2)
+static int x86CPUInfo(CPUFeature feature) {
+ int cpu_info[4];
+ GetCPUInfo(cpu_info, 1);
+ if (feature == kSSE2) {
+ return 0 != (cpu_info[3] & 0x04000000);
+ }
+ if (feature == kSSE3) {
+ return 0 != (cpu_info[2] & 0x00000001);
+ }
+ return 0;
+}
+VP8CPUInfo VP8GetCPUInfo = x86CPUInfo;
+#elif defined(WEBP_ANDROID_NEON)
+static int AndroidCPUInfo(CPUFeature feature) {
+ const AndroidCpuFamily cpu_family = android_getCpuFamily();
+ const uint64_t cpu_features = android_getCpuFeatures();
+ if (feature == kNEON) {
+ return (cpu_family == ANDROID_CPU_FAMILY_ARM &&
+ 0 != (cpu_features & ANDROID_CPU_ARM_FEATURE_NEON));
+ }
+ return 0;
+}
+VP8CPUInfo VP8GetCPUInfo = AndroidCPUInfo;
+#elif defined(__ARM_NEON__)
+// define a dummy function to enable turning off NEON at runtime by setting
+// VP8DecGetCPUInfo = NULL
+static int armCPUInfo(CPUFeature feature) {
+ (void)feature;
+ return 1;
+}
+VP8CPUInfo VP8GetCPUInfo = armCPUInfo;
+#else
+VP8CPUInfo VP8GetCPUInfo = NULL;
+#endif
+
diff --git a/drivers/webp/dsp/dec.c b/drivers/webp/dsp/dec.c
new file mode 100644
index 000000000..8b246fad0
--- /dev/null
+++ b/drivers/webp/dsp/dec.c
@@ -0,0 +1,756 @@
+// Copyright 2010 Google Inc. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the COPYING file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+// -----------------------------------------------------------------------------
+//
+// Speed-critical decoding functions.
+//
+// Author: Skal (pascal.massimino@gmail.com)
+
+#include "./dsp.h"
+#include "../dec/vp8i.h"
+
+//------------------------------------------------------------------------------
+// run-time tables (~4k)
+
+static uint8_t abs0[255 + 255 + 1]; // abs(i)
+static uint8_t abs1[255 + 255 + 1]; // abs(i)>>1
+static int8_t sclip1[1020 + 1020 + 1]; // clips [-1020, 1020] to [-128, 127]
+static int8_t sclip2[112 + 112 + 1]; // clips [-112, 112] to [-16, 15]
+static uint8_t clip1[255 + 510 + 1]; // clips [-255,510] to [0,255]
+
+// We declare this variable 'volatile' to prevent instruction reordering
+// and make sure it's set to true _last_ (so as to be thread-safe)
+static volatile int tables_ok = 0;
+
+static void DspInitTables(void) {
+ if (!tables_ok) {
+ int i;
+ for (i = -255; i <= 255; ++i) {
+ abs0[255 + i] = (i < 0) ? -i : i;
+ abs1[255 + i] = abs0[255 + i] >> 1;
+ }
+ for (i = -1020; i <= 1020; ++i) {
+ sclip1[1020 + i] = (i < -128) ? -128 : (i > 127) ? 127 : i;
+ }
+ for (i = -112; i <= 112; ++i) {
+ sclip2[112 + i] = (i < -16) ? -16 : (i > 15) ? 15 : i;
+ }
+ for (i = -255; i <= 255 + 255; ++i) {
+ clip1[255 + i] = (i < 0) ? 0 : (i > 255) ? 255 : i;
+ }
+ tables_ok = 1;
+ }
+}
+
+static WEBP_INLINE uint8_t clip_8b(int v) {
+ return (!(v & ~0xff)) ? v : (v < 0) ? 0 : 255;
+}
+
+//------------------------------------------------------------------------------
+// Transforms (Paragraph 14.4)
+
+#define STORE(x, y, v) \
+ dst[x + y * BPS] = clip_8b(dst[x + y * BPS] + ((v) >> 3))
+
+#define STORE2(y, dc, d, c) do { \
+ const int DC = (dc); \
+ STORE(0, y, DC + (d)); \
+ STORE(1, y, DC + (c)); \
+ STORE(2, y, DC - (c)); \
+ STORE(3, y, DC - (d)); \
+} while (0)
+
+static const int kC1 = 20091 + (1 << 16);
+static const int kC2 = 35468;
+#define MUL(a, b) (((a) * (b)) >> 16)
+
+static void TransformOne(const int16_t* in, uint8_t* dst) {
+ int C[4 * 4], *tmp;
+ int i;
+ tmp = C;
+ for (i = 0; i < 4; ++i) { // vertical pass
+ const int a = in[0] + in[8]; // [-4096, 4094]
+ const int b = in[0] - in[8]; // [-4095, 4095]
+ const int c = MUL(in[4], kC2) - MUL(in[12], kC1); // [-3783, 3783]
+ const int d = MUL(in[4], kC1) + MUL(in[12], kC2); // [-3785, 3781]
+ tmp[0] = a + d; // [-7881, 7875]
+ tmp[1] = b + c; // [-7878, 7878]
+ tmp[2] = b - c; // [-7878, 7878]
+ tmp[3] = a - d; // [-7877, 7879]
+ tmp += 4;
+ in++;
+ }
+ // Each pass is expanding the dynamic range by ~3.85 (upper bound).
+ // The exact value is (2. + (kC1 + kC2) / 65536).
+ // After the second pass, maximum interval is [-3794, 3794], assuming
+ // an input in [-2048, 2047] interval. We then need to add a dst value
+ // in the [0, 255] range.
+ // In the worst case scenario, the input to clip_8b() can be as large as
+ // [-60713, 60968].
+ tmp = C;
+ for (i = 0; i < 4; ++i) { // horizontal pass
+ const int dc = tmp[0] + 4;
+ const int a = dc + tmp[8];
+ const int b = dc - tmp[8];
+ const int c = MUL(tmp[4], kC2) - MUL(tmp[12], kC1);
+ const int d = MUL(tmp[4], kC1) + MUL(tmp[12], kC2);
+ STORE(0, 0, a + d);
+ STORE(1, 0, b + c);
+ STORE(2, 0, b - c);
+ STORE(3, 0, a - d);
+ tmp++;
+ dst += BPS;
+ }
+}
+
+// Simplified transform when only in[0], in[1] and in[4] are non-zero
+static void TransformAC3(const int16_t* in, uint8_t* dst) {
+ const int a = in[0] + 4;
+ const int c4 = MUL(in[4], kC2);
+ const int d4 = MUL(in[4], kC1);
+ const int c1 = MUL(in[1], kC2);
+ const int d1 = MUL(in[1], kC1);
+ STORE2(0, a + d4, d1, c1);
+ STORE2(1, a + c4, d1, c1);
+ STORE2(2, a - c4, d1, c1);
+ STORE2(3, a - d4, d1, c1);
+}
+#undef MUL
+#undef STORE2
+
+static void TransformTwo(const int16_t* in, uint8_t* dst, int do_two) {
+ TransformOne(in, dst);
+ if (do_two) {
+ TransformOne(in + 16, dst + 4);
+ }
+}
+
+static void TransformUV(const int16_t* in, uint8_t* dst) {
+ VP8Transform(in + 0 * 16, dst, 1);
+ VP8Transform(in + 2 * 16, dst + 4 * BPS, 1);
+}
+
+static void TransformDC(const int16_t *in, uint8_t* dst) {
+ const int DC = in[0] + 4;
+ int i, j;
+ for (j = 0; j < 4; ++j) {
+ for (i = 0; i < 4; ++i) {
+ STORE(i, j, DC);
+ }
+ }
+}
+
+static void TransformDCUV(const int16_t* in, uint8_t* dst) {
+ if (in[0 * 16]) TransformDC(in + 0 * 16, dst);
+ if (in[1 * 16]) TransformDC(in + 1 * 16, dst + 4);
+ if (in[2 * 16]) TransformDC(in + 2 * 16, dst + 4 * BPS);
+ if (in[3 * 16]) TransformDC(in + 3 * 16, dst + 4 * BPS + 4);
+}
+
+#undef STORE
+
+//------------------------------------------------------------------------------
+// Paragraph 14.3
+
+static void TransformWHT(const int16_t* in, int16_t* out) {
+ int tmp[16];
+ int i;
+ for (i = 0; i < 4; ++i) {
+ const int a0 = in[0 + i] + in[12 + i];
+ const int a1 = in[4 + i] + in[ 8 + i];
+ const int a2 = in[4 + i] - in[ 8 + i];
+ const int a3 = in[0 + i] - in[12 + i];
+ tmp[0 + i] = a0 + a1;
+ tmp[8 + i] = a0 - a1;
+ tmp[4 + i] = a3 + a2;
+ tmp[12 + i] = a3 - a2;
+ }
+ for (i = 0; i < 4; ++i) {
+ const int dc = tmp[0 + i * 4] + 3; // w/ rounder
+ const int a0 = dc + tmp[3 + i * 4];
+ const int a1 = tmp[1 + i * 4] + tmp[2 + i * 4];
+ const int a2 = tmp[1 + i * 4] - tmp[2 + i * 4];
+ const int a3 = dc - tmp[3 + i * 4];
+ out[ 0] = (a0 + a1) >> 3;
+ out[16] = (a3 + a2) >> 3;
+ out[32] = (a0 - a1) >> 3;
+ out[48] = (a3 - a2) >> 3;
+ out += 64;
+ }
+}
+
+void (*VP8TransformWHT)(const int16_t* in, int16_t* out) = TransformWHT;
+
+//------------------------------------------------------------------------------
+// Intra predictions
+
+#define DST(x, y) dst[(x) + (y) * BPS]
+
+static WEBP_INLINE void TrueMotion(uint8_t *dst, int size) {
+ const uint8_t* top = dst - BPS;
+ const uint8_t* const clip0 = clip1 + 255 - top[-1];
+ int y;
+ for (y = 0; y < size; ++y) {
+ const uint8_t* const clip = clip0 + dst[-1];
+ int x;
+ for (x = 0; x < size; ++x) {
+ dst[x] = clip[top[x]];
+ }
+ dst += BPS;
+ }
+}
+static void TM4(uint8_t *dst) { TrueMotion(dst, 4); }
+static void TM8uv(uint8_t *dst) { TrueMotion(dst, 8); }
+static void TM16(uint8_t *dst) { TrueMotion(dst, 16); }
+
+//------------------------------------------------------------------------------
+// 16x16
+
+static void VE16(uint8_t *dst) { // vertical
+ int j;
+ for (j = 0; j < 16; ++j) {
+ memcpy(dst + j * BPS, dst - BPS, 16);
+ }
+}
+
+static void HE16(uint8_t *dst) { // horizontal
+ int j;
+ for (j = 16; j > 0; --j) {
+ memset(dst, dst[-1], 16);
+ dst += BPS;
+ }
+}
+
+static WEBP_INLINE void Put16(int v, uint8_t* dst) {
+ int j;
+ for (j = 0; j < 16; ++j) {
+ memset(dst + j * BPS, v, 16);
+ }
+}
+
+static void DC16(uint8_t *dst) { // DC
+ int DC = 16;
+ int j;
+ for (j = 0; j < 16; ++j) {
+ DC += dst[-1 + j * BPS] + dst[j - BPS];
+ }
+ Put16(DC >> 5, dst);
+}
+
+static void DC16NoTop(uint8_t *dst) { // DC with top samples not available
+ int DC = 8;
+ int j;
+ for (j = 0; j < 16; ++j) {
+ DC += dst[-1 + j * BPS];
+ }
+ Put16(DC >> 4, dst);
+}
+
+static void DC16NoLeft(uint8_t *dst) { // DC with left samples not available
+ int DC = 8;
+ int i;
+ for (i = 0; i < 16; ++i) {
+ DC += dst[i - BPS];
+ }
+ Put16(DC >> 4, dst);
+}
+
+static void DC16NoTopLeft(uint8_t *dst) { // DC with no top and left samples
+ Put16(0x80, dst);
+}
+
+//------------------------------------------------------------------------------
+// 4x4
+
+#define AVG3(a, b, c) (((a) + 2 * (b) + (c) + 2) >> 2)
+#define AVG2(a, b) (((a) + (b) + 1) >> 1)
+
+static void VE4(uint8_t *dst) { // vertical
+ const uint8_t* top = dst - BPS;
+ const uint8_t vals[4] = {
+ AVG3(top[-1], top[0], top[1]),
+ AVG3(top[ 0], top[1], top[2]),
+ AVG3(top[ 1], top[2], top[3]),
+ AVG3(top[ 2], top[3], top[4])
+ };
+ int i;
+ for (i = 0; i < 4; ++i) {
+ memcpy(dst + i * BPS, vals, sizeof(vals));
+ }
+}
+
+static void HE4(uint8_t *dst) { // horizontal
+ const int A = dst[-1 - BPS];
+ const int B = dst[-1];
+ const int C = dst[-1 + BPS];
+ const int D = dst[-1 + 2 * BPS];
+ const int E = dst[-1 + 3 * BPS];
+ *(uint32_t*)(dst + 0 * BPS) = 0x01010101U * AVG3(A, B, C);
+ *(uint32_t*)(dst + 1 * BPS) = 0x01010101U * AVG3(B, C, D);
+ *(uint32_t*)(dst + 2 * BPS) = 0x01010101U * AVG3(C, D, E);
+ *(uint32_t*)(dst + 3 * BPS) = 0x01010101U * AVG3(D, E, E);
+}
+
+static void DC4(uint8_t *dst) { // DC
+ uint32_t dc = 4;
+ int i;
+ for (i = 0; i < 4; ++i) dc += dst[i - BPS] + dst[-1 + i * BPS];
+ dc >>= 3;
+ for (i = 0; i < 4; ++i) memset(dst + i * BPS, dc, 4);
+}
+
+static void RD4(uint8_t *dst) { // Down-right
+ const int I = dst[-1 + 0 * BPS];
+ const int J = dst[-1 + 1 * BPS];
+ const int K = dst[-1 + 2 * BPS];
+ const int L = dst[-1 + 3 * BPS];
+ const int X = dst[-1 - BPS];
+ const int A = dst[0 - BPS];
+ const int B = dst[1 - BPS];
+ const int C = dst[2 - BPS];
+ const int D = dst[3 - BPS];
+ DST(0, 3) = AVG3(J, K, L);
+ DST(0, 2) = DST(1, 3) = AVG3(I, J, K);
+ DST(0, 1) = DST(1, 2) = DST(2, 3) = AVG3(X, I, J);
+ DST(0, 0) = DST(1, 1) = DST(2, 2) = DST(3, 3) = AVG3(A, X, I);
+ DST(1, 0) = DST(2, 1) = DST(3, 2) = AVG3(B, A, X);
+ DST(2, 0) = DST(3, 1) = AVG3(C, B, A);
+ DST(3, 0) = AVG3(D, C, B);
+}
+
+static void LD4(uint8_t *dst) { // Down-Left
+ const int A = dst[0 - BPS];
+ const int B = dst[1 - BPS];
+ const int C = dst[2 - BPS];
+ const int D = dst[3 - BPS];
+ const int E = dst[4 - BPS];
+ const int F = dst[5 - BPS];
+ const int G = dst[6 - BPS];
+ const int H = dst[7 - BPS];
+ DST(0, 0) = AVG3(A, B, C);
+ DST(1, 0) = DST(0, 1) = AVG3(B, C, D);
+ DST(2, 0) = DST(1, 1) = DST(0, 2) = AVG3(C, D, E);
+ DST(3, 0) = DST(2, 1) = DST(1, 2) = DST(0, 3) = AVG3(D, E, F);
+ DST(3, 1) = DST(2, 2) = DST(1, 3) = AVG3(E, F, G);
+ DST(3, 2) = DST(2, 3) = AVG3(F, G, H);
+ DST(3, 3) = AVG3(G, H, H);
+}
+
+static void VR4(uint8_t *dst) { // Vertical-Right
+ const int I = dst[-1 + 0 * BPS];
+ const int J = dst[-1 + 1 * BPS];
+ const int K = dst[-1 + 2 * BPS];
+ const int X = dst[-1 - BPS];
+ const int A = dst[0 - BPS];
+ const int B = dst[1 - BPS];
+ const int C = dst[2 - BPS];
+ const int D = dst[3 - BPS];
+ DST(0, 0) = DST(1, 2) = AVG2(X, A);
+ DST(1, 0) = DST(2, 2) = AVG2(A, B);
+ DST(2, 0) = DST(3, 2) = AVG2(B, C);
+ DST(3, 0) = AVG2(C, D);
+
+ DST(0, 3) = AVG3(K, J, I);
+ DST(0, 2) = AVG3(J, I, X);
+ DST(0, 1) = DST(1, 3) = AVG3(I, X, A);
+ DST(1, 1) = DST(2, 3) = AVG3(X, A, B);
+ DST(2, 1) = DST(3, 3) = AVG3(A, B, C);
+ DST(3, 1) = AVG3(B, C, D);
+}
+
+static void VL4(uint8_t *dst) { // Vertical-Left
+ const int A = dst[0 - BPS];
+ const int B = dst[1 - BPS];
+ const int C = dst[2 - BPS];
+ const int D = dst[3 - BPS];
+ const int E = dst[4 - BPS];
+ const int F = dst[5 - BPS];
+ const int G = dst[6 - BPS];
+ const int H = dst[7 - BPS];
+ DST(0, 0) = AVG2(A, B);
+ DST(1, 0) = DST(0, 2) = AVG2(B, C);
+ DST(2, 0) = DST(1, 2) = AVG2(C, D);
+ DST(3, 0) = DST(2, 2) = AVG2(D, E);
+
+ DST(0, 1) = AVG3(A, B, C);
+ DST(1, 1) = DST(0, 3) = AVG3(B, C, D);
+ DST(2, 1) = DST(1, 3) = AVG3(C, D, E);
+ DST(3, 1) = DST(2, 3) = AVG3(D, E, F);
+ DST(3, 2) = AVG3(E, F, G);
+ DST(3, 3) = AVG3(F, G, H);
+}
+
+static void HU4(uint8_t *dst) { // Horizontal-Up
+ const int I = dst[-1 + 0 * BPS];
+ const int J = dst[-1 + 1 * BPS];
+ const int K = dst[-1 + 2 * BPS];
+ const int L = dst[-1 + 3 * BPS];
+ DST(0, 0) = AVG2(I, J);
+ DST(2, 0) = DST(0, 1) = AVG2(J, K);
+ DST(2, 1) = DST(0, 2) = AVG2(K, L);
+ DST(1, 0) = AVG3(I, J, K);
+ DST(3, 0) = DST(1, 1) = AVG3(J, K, L);
+ DST(3, 1) = DST(1, 2) = AVG3(K, L, L);
+ DST(3, 2) = DST(2, 2) =
+ DST(0, 3) = DST(1, 3) = DST(2, 3) = DST(3, 3) = L;
+}
+
+static void HD4(uint8_t *dst) { // Horizontal-Down
+ const int I = dst[-1 + 0 * BPS];
+ const int J = dst[-1 + 1 * BPS];
+ const int K = dst[-1 + 2 * BPS];
+ const int L = dst[-1 + 3 * BPS];
+ const int X = dst[-1 - BPS];
+ const int A = dst[0 - BPS];
+ const int B = dst[1 - BPS];
+ const int C = dst[2 - BPS];
+
+ DST(0, 0) = DST(2, 1) = AVG2(I, X);
+ DST(0, 1) = DST(2, 2) = AVG2(J, I);
+ DST(0, 2) = DST(2, 3) = AVG2(K, J);
+ DST(0, 3) = AVG2(L, K);
+
+ DST(3, 0) = AVG3(A, B, C);
+ DST(2, 0) = AVG3(X, A, B);
+ DST(1, 0) = DST(3, 1) = AVG3(I, X, A);
+ DST(1, 1) = DST(3, 2) = AVG3(J, I, X);
+ DST(1, 2) = DST(3, 3) = AVG3(K, J, I);
+ DST(1, 3) = AVG3(L, K, J);
+}
+
+#undef DST
+#undef AVG3
+#undef AVG2
+
+//------------------------------------------------------------------------------
+// Chroma
+
+static void VE8uv(uint8_t *dst) { // vertical
+ int j;
+ for (j = 0; j < 8; ++j) {
+ memcpy(dst + j * BPS, dst - BPS, 8);
+ }
+}
+
+static void HE8uv(uint8_t *dst) { // horizontal
+ int j;
+ for (j = 0; j < 8; ++j) {
+ memset(dst, dst[-1], 8);
+ dst += BPS;
+ }
+}
+
+// helper for chroma-DC predictions
+static WEBP_INLINE void Put8x8uv(uint8_t value, uint8_t* dst) {
+ int j;
+#ifndef WEBP_REFERENCE_IMPLEMENTATION
+ const uint64_t v = (uint64_t)value * 0x0101010101010101ULL;
+ for (j = 0; j < 8; ++j) {
+ *(uint64_t*)(dst + j * BPS) = v;
+ }
+#else
+ for (j = 0; j < 8; ++j) memset(dst + j * BPS, value, 8);
+#endif
+}
+
+static void DC8uv(uint8_t *dst) { // DC
+ int dc0 = 8;
+ int i;
+ for (i = 0; i < 8; ++i) {
+ dc0 += dst[i - BPS] + dst[-1 + i * BPS];
+ }
+ Put8x8uv(dc0 >> 4, dst);
+}
+
+static void DC8uvNoLeft(uint8_t *dst) { // DC with no left samples
+ int dc0 = 4;
+ int i;
+ for (i = 0; i < 8; ++i) {
+ dc0 += dst[i - BPS];
+ }
+ Put8x8uv(dc0 >> 3, dst);
+}
+
+static void DC8uvNoTop(uint8_t *dst) { // DC with no top samples
+ int dc0 = 4;
+ int i;
+ for (i = 0; i < 8; ++i) {
+ dc0 += dst[-1 + i * BPS];
+ }
+ Put8x8uv(dc0 >> 3, dst);
+}
+
+static void DC8uvNoTopLeft(uint8_t *dst) { // DC with nothing
+ Put8x8uv(0x80, dst);
+}
+
+//------------------------------------------------------------------------------
+// default C implementations
+
+const VP8PredFunc VP8PredLuma4[NUM_BMODES] = {
+ DC4, TM4, VE4, HE4, RD4, VR4, LD4, VL4, HD4, HU4
+};
+
+const VP8PredFunc VP8PredLuma16[NUM_B_DC_MODES] = {
+ DC16, TM16, VE16, HE16,
+ DC16NoTop, DC16NoLeft, DC16NoTopLeft
+};
+
+const VP8PredFunc VP8PredChroma8[NUM_B_DC_MODES] = {
+ DC8uv, TM8uv, VE8uv, HE8uv,
+ DC8uvNoTop, DC8uvNoLeft, DC8uvNoTopLeft
+};
+
+//------------------------------------------------------------------------------
+// Edge filtering functions
+
+// 4 pixels in, 2 pixels out
+static WEBP_INLINE void do_filter2(uint8_t* p, int step) {
+ const int p1 = p[-2*step], p0 = p[-step], q0 = p[0], q1 = p[step];
+ const int a = 3 * (q0 - p0) + sclip1[1020 + p1 - q1];
+ const int a1 = sclip2[112 + ((a + 4) >> 3)];
+ const int a2 = sclip2[112 + ((a + 3) >> 3)];
+ p[-step] = clip1[255 + p0 + a2];
+ p[ 0] = clip1[255 + q0 - a1];
+}
+
+// 4 pixels in, 4 pixels out
+static WEBP_INLINE void do_filter4(uint8_t* p, int step) {
+ const int p1 = p[-2*step], p0 = p[-step], q0 = p[0], q1 = p[step];
+ const int a = 3 * (q0 - p0);
+ const int a1 = sclip2[112 + ((a + 4) >> 3)];
+ const int a2 = sclip2[112 + ((a + 3) >> 3)];
+ const int a3 = (a1 + 1) >> 1;
+ p[-2*step] = clip1[255 + p1 + a3];
+ p[- step] = clip1[255 + p0 + a2];
+ p[ 0] = clip1[255 + q0 - a1];
+ p[ step] = clip1[255 + q1 - a3];
+}
+
+// 6 pixels in, 6 pixels out
+static WEBP_INLINE void do_filter6(uint8_t* p, int step) {
+ const int p2 = p[-3*step], p1 = p[-2*step], p0 = p[-step];
+ const int q0 = p[0], q1 = p[step], q2 = p[2*step];
+ const int a = sclip1[1020 + 3 * (q0 - p0) + sclip1[1020 + p1 - q1]];
+ const int a1 = (27 * a + 63) >> 7; // eq. to ((3 * a + 7) * 9) >> 7
+ const int a2 = (18 * a + 63) >> 7; // eq. to ((2 * a + 7) * 9) >> 7
+ const int a3 = (9 * a + 63) >> 7; // eq. to ((1 * a + 7) * 9) >> 7
+ p[-3*step] = clip1[255 + p2 + a3];
+ p[-2*step] = clip1[255 + p1 + a2];
+ p[- step] = clip1[255 + p0 + a1];
+ p[ 0] = clip1[255 + q0 - a1];
+ p[ step] = clip1[255 + q1 - a2];
+ p[ 2*step] = clip1[255 + q2 - a3];
+}
+
+static WEBP_INLINE int hev(const uint8_t* p, int step, int thresh) {
+ const int p1 = p[-2*step], p0 = p[-step], q0 = p[0], q1 = p[step];
+ return (abs0[255 + p1 - p0] > thresh) || (abs0[255 + q1 - q0] > thresh);
+}
+
+static WEBP_INLINE int needs_filter(const uint8_t* p, int step, int thresh) {
+ const int p1 = p[-2*step], p0 = p[-step], q0 = p[0], q1 = p[step];
+ return (2 * abs0[255 + p0 - q0] + abs1[255 + p1 - q1]) <= thresh;
+}
+
+static WEBP_INLINE int needs_filter2(const uint8_t* p,
+ int step, int t, int it) {
+ const int p3 = p[-4*step], p2 = p[-3*step], p1 = p[-2*step], p0 = p[-step];
+ const int q0 = p[0], q1 = p[step], q2 = p[2*step], q3 = p[3*step];
+ if ((2 * abs0[255 + p0 - q0] + abs1[255 + p1 - q1]) > t)
+ return 0;
+ return abs0[255 + p3 - p2] <= it && abs0[255 + p2 - p1] <= it &&
+ abs0[255 + p1 - p0] <= it && abs0[255 + q3 - q2] <= it &&
+ abs0[255 + q2 - q1] <= it && abs0[255 + q1 - q0] <= it;
+}
+
+//------------------------------------------------------------------------------
+// Simple In-loop filtering (Paragraph 15.2)
+
+static void SimpleVFilter16(uint8_t* p, int stride, int thresh) {
+ int i;
+ for (i = 0; i < 16; ++i) {
+ if (needs_filter(p + i, stride, thresh)) {
+ do_filter2(p + i, stride);
+ }
+ }
+}
+
+static void SimpleHFilter16(uint8_t* p, int stride, int thresh) {
+ int i;
+ for (i = 0; i < 16; ++i) {
+ if (needs_filter(p + i * stride, 1, thresh)) {
+ do_filter2(p + i * stride, 1);
+ }
+ }
+}
+
+static void SimpleVFilter16i(uint8_t* p, int stride, int thresh) {
+ int k;
+ for (k = 3; k > 0; --k) {
+ p += 4 * stride;
+ SimpleVFilter16(p, stride, thresh);
+ }
+}
+
+static void SimpleHFilter16i(uint8_t* p, int stride, int thresh) {
+ int k;
+ for (k = 3; k > 0; --k) {
+ p += 4;
+ SimpleHFilter16(p, stride, thresh);
+ }
+}
+
+//------------------------------------------------------------------------------
+// Complex In-loop filtering (Paragraph 15.3)
+
+static WEBP_INLINE void FilterLoop26(uint8_t* p,
+ int hstride, int vstride, int size,
+ int thresh, int ithresh, int hev_thresh) {
+ while (size-- > 0) {
+ if (needs_filter2(p, hstride, thresh, ithresh)) {
+ if (hev(p, hstride, hev_thresh)) {
+ do_filter2(p, hstride);
+ } else {
+ do_filter6(p, hstride);
+ }
+ }
+ p += vstride;
+ }
+}
+
+static WEBP_INLINE void FilterLoop24(uint8_t* p,
+ int hstride, int vstride, int size,
+ int thresh, int ithresh, int hev_thresh) {
+ while (size-- > 0) {
+ if (needs_filter2(p, hstride, thresh, ithresh)) {
+ if (hev(p, hstride, hev_thresh)) {
+ do_filter2(p, hstride);
+ } else {
+ do_filter4(p, hstride);
+ }
+ }
+ p += vstride;
+ }
+}
+
+// on macroblock edges
+static void VFilter16(uint8_t* p, int stride,
+ int thresh, int ithresh, int hev_thresh) {
+ FilterLoop26(p, stride, 1, 16, thresh, ithresh, hev_thresh);
+}
+
+static void HFilter16(uint8_t* p, int stride,
+ int thresh, int ithresh, int hev_thresh) {
+ FilterLoop26(p, 1, stride, 16, thresh, ithresh, hev_thresh);
+}
+
+// on three inner edges
+static void VFilter16i(uint8_t* p, int stride,
+ int thresh, int ithresh, int hev_thresh) {
+ int k;
+ for (k = 3; k > 0; --k) {
+ p += 4 * stride;
+ FilterLoop24(p, stride, 1, 16, thresh, ithresh, hev_thresh);
+ }
+}
+
+static void HFilter16i(uint8_t* p, int stride,
+ int thresh, int ithresh, int hev_thresh) {
+ int k;
+ for (k = 3; k > 0; --k) {
+ p += 4;
+ FilterLoop24(p, 1, stride, 16, thresh, ithresh, hev_thresh);
+ }
+}
+
+// 8-pixels wide variant, for chroma filtering
+static void VFilter8(uint8_t* u, uint8_t* v, int stride,
+ int thresh, int ithresh, int hev_thresh) {
+ FilterLoop26(u, stride, 1, 8, thresh, ithresh, hev_thresh);
+ FilterLoop26(v, stride, 1, 8, thresh, ithresh, hev_thresh);
+}
+
+static void HFilter8(uint8_t* u, uint8_t* v, int stride,
+ int thresh, int ithresh, int hev_thresh) {
+ FilterLoop26(u, 1, stride, 8, thresh, ithresh, hev_thresh);
+ FilterLoop26(v, 1, stride, 8, thresh, ithresh, hev_thresh);
+}
+
+static void VFilter8i(uint8_t* u, uint8_t* v, int stride,
+ int thresh, int ithresh, int hev_thresh) {
+ FilterLoop24(u + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh);
+ FilterLoop24(v + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh);
+}
+
+static void HFilter8i(uint8_t* u, uint8_t* v, int stride,
+ int thresh, int ithresh, int hev_thresh) {
+ FilterLoop24(u + 4, 1, stride, 8, thresh, ithresh, hev_thresh);
+ FilterLoop24(v + 4, 1, stride, 8, thresh, ithresh, hev_thresh);
+}
+
+//------------------------------------------------------------------------------
+
+VP8DecIdct2 VP8Transform;
+VP8DecIdct VP8TransformAC3;
+VP8DecIdct VP8TransformUV;
+VP8DecIdct VP8TransformDC;
+VP8DecIdct VP8TransformDCUV;
+
+VP8LumaFilterFunc VP8VFilter16;
+VP8LumaFilterFunc VP8HFilter16;
+VP8ChromaFilterFunc VP8VFilter8;
+VP8ChromaFilterFunc VP8HFilter8;
+VP8LumaFilterFunc VP8VFilter16i;
+VP8LumaFilterFunc VP8HFilter16i;
+VP8ChromaFilterFunc VP8VFilter8i;
+VP8ChromaFilterFunc VP8HFilter8i;
+VP8SimpleFilterFunc VP8SimpleVFilter16;
+VP8SimpleFilterFunc VP8SimpleHFilter16;
+VP8SimpleFilterFunc VP8SimpleVFilter16i;
+VP8SimpleFilterFunc VP8SimpleHFilter16i;
+
+extern void VP8DspInitSSE2(void);
+extern void VP8DspInitNEON(void);
+
+void VP8DspInit(void) {
+ DspInitTables();
+
+ VP8Transform = TransformTwo;
+ VP8TransformUV = TransformUV;
+ VP8TransformDC = TransformDC;
+ VP8TransformDCUV = TransformDCUV;
+ VP8TransformAC3 = TransformAC3;
+
+ VP8VFilter16 = VFilter16;
+ VP8HFilter16 = HFilter16;
+ VP8VFilter8 = VFilter8;
+ VP8HFilter8 = HFilter8;
+ VP8VFilter16i = VFilter16i;
+ VP8HFilter16i = HFilter16i;
+ VP8VFilter8i = VFilter8i;
+ VP8HFilter8i = HFilter8i;
+ VP8SimpleVFilter16 = SimpleVFilter16;
+ VP8SimpleHFilter16 = SimpleHFilter16;
+ VP8SimpleVFilter16i = SimpleVFilter16i;
+ VP8SimpleHFilter16i = SimpleHFilter16i;
+
+ // If defined, use CPUInfo() to overwrite some pointers with faster versions.
+ if (VP8GetCPUInfo) {
+#if defined(WEBP_USE_SSE2)
+ if (VP8GetCPUInfo(kSSE2)) {
+ VP8DspInitSSE2();
+ }
+#elif defined(WEBP_USE_NEON)
+ if (VP8GetCPUInfo(kNEON)) {
+ VP8DspInitNEON();
+ }
+#endif
+ }
+}
+
diff --git a/drivers/webp/dsp/dec_neon.c b/drivers/webp/dsp/dec_neon.c
new file mode 100644
index 000000000..9c3d8cc01
--- /dev/null
+++ b/drivers/webp/dsp/dec_neon.c
@@ -0,0 +1,433 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the COPYING file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+// -----------------------------------------------------------------------------
+//
+// ARM NEON version of dsp functions and loop filtering.
+//
+// Authors: Somnath Banerjee (somnath@google.com)
+// Johann Koenig (johannkoenig@google.com)
+
+#include "./dsp.h"
+
+#if defined(WEBP_USE_NEON)
+
+#include "../dec/vp8i.h"
+
+#define QRegs "q0", "q1", "q2", "q3", \
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+
+#define FLIP_SIGN_BIT2(a, b, s) \
+ "veor " #a "," #a "," #s " \n" \
+ "veor " #b "," #b "," #s " \n" \
+
+#define FLIP_SIGN_BIT4(a, b, c, d, s) \
+ FLIP_SIGN_BIT2(a, b, s) \
+ FLIP_SIGN_BIT2(c, d, s) \
+
+#define NEEDS_FILTER(p1, p0, q0, q1, thresh, mask) \
+ "vabd.u8 q15," #p0 "," #q0 " \n" /* abs(p0 - q0) */ \
+ "vabd.u8 q14," #p1 "," #q1 " \n" /* abs(p1 - q1) */ \
+ "vqadd.u8 q15, q15, q15 \n" /* abs(p0 - q0) * 2 */ \
+ "vshr.u8 q14, q14, #1 \n" /* abs(p1 - q1) / 2 */ \
+ "vqadd.u8 q15, q15, q14 \n" /* abs(p0 - q0) * 2 + abs(p1 - q1) / 2 */ \
+ "vdup.8 q14, " #thresh " \n" \
+ "vcge.u8 " #mask ", q14, q15 \n" /* mask <= thresh */
+
+#define GET_BASE_DELTA(p1, p0, q0, q1, o) \
+ "vqsub.s8 q15," #q0 "," #p0 " \n" /* (q0 - p0) */ \
+ "vqsub.s8 " #o "," #p1 "," #q1 " \n" /* (p1 - q1) */ \
+ "vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 1 * (p0 - q0) */ \
+ "vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 2 * (p0 - q0) */ \
+ "vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 3 * (p0 - q0) */
+
+#define DO_SIMPLE_FILTER(p0, q0, fl) \
+ "vmov.i8 q15, #0x03 \n" \
+ "vqadd.s8 q15, q15, " #fl " \n" /* filter1 = filter + 3 */ \
+ "vshr.s8 q15, q15, #3 \n" /* filter1 >> 3 */ \
+ "vqadd.s8 " #p0 "," #p0 ", q15 \n" /* p0 += filter1 */ \
+ \
+ "vmov.i8 q15, #0x04 \n" \
+ "vqadd.s8 q15, q15, " #fl " \n" /* filter1 = filter + 4 */ \
+ "vshr.s8 q15, q15, #3 \n" /* filter2 >> 3 */ \
+ "vqsub.s8 " #q0 "," #q0 ", q15 \n" /* q0 -= filter2 */
+
+// Applies filter on 2 pixels (p0 and q0)
+#define DO_FILTER2(p1, p0, q0, q1, thresh) \
+ NEEDS_FILTER(p1, p0, q0, q1, thresh, q9) /* filter mask in q9 */ \
+ "vmov.i8 q10, #0x80 \n" /* sign bit */ \
+ FLIP_SIGN_BIT4(p1, p0, q0, q1, q10) /* convert to signed value */ \
+ GET_BASE_DELTA(p1, p0, q0, q1, q11) /* get filter level */ \
+ "vand q9, q9, q11 \n" /* apply filter mask */ \
+ DO_SIMPLE_FILTER(p0, q0, q9) /* apply filter */ \
+ FLIP_SIGN_BIT2(p0, q0, q10)
+
+// Load/Store vertical edge
+#define LOAD8x4(c1, c2, c3, c4, b1, b2, stride) \
+ "vld4.8 {" #c1"[0], " #c2"[0], " #c3"[0], " #c4"[0]}," #b1 "," #stride"\n" \
+ "vld4.8 {" #c1"[1], " #c2"[1], " #c3"[1], " #c4"[1]}," #b2 "," #stride"\n" \
+ "vld4.8 {" #c1"[2], " #c2"[2], " #c3"[2], " #c4"[2]}," #b1 "," #stride"\n" \
+ "vld4.8 {" #c1"[3], " #c2"[3], " #c3"[3], " #c4"[3]}," #b2 "," #stride"\n" \
+ "vld4.8 {" #c1"[4], " #c2"[4], " #c3"[4], " #c4"[4]}," #b1 "," #stride"\n" \
+ "vld4.8 {" #c1"[5], " #c2"[5], " #c3"[5], " #c4"[5]}," #b2 "," #stride"\n" \
+ "vld4.8 {" #c1"[6], " #c2"[6], " #c3"[6], " #c4"[6]}," #b1 "," #stride"\n" \
+ "vld4.8 {" #c1"[7], " #c2"[7], " #c3"[7], " #c4"[7]}," #b2 "," #stride"\n"
+
+#define STORE8x2(c1, c2, p, stride) \
+ "vst2.8 {" #c1"[0], " #c2"[0]}," #p "," #stride " \n" \
+ "vst2.8 {" #c1"[1], " #c2"[1]}," #p "," #stride " \n" \
+ "vst2.8 {" #c1"[2], " #c2"[2]}," #p "," #stride " \n" \
+ "vst2.8 {" #c1"[3], " #c2"[3]}," #p "," #stride " \n" \
+ "vst2.8 {" #c1"[4], " #c2"[4]}," #p "," #stride " \n" \
+ "vst2.8 {" #c1"[5], " #c2"[5]}," #p "," #stride " \n" \
+ "vst2.8 {" #c1"[6], " #c2"[6]}," #p "," #stride " \n" \
+ "vst2.8 {" #c1"[7], " #c2"[7]}," #p "," #stride " \n"
+
+//-----------------------------------------------------------------------------
+// Simple In-loop filtering (Paragraph 15.2)
+
+static void SimpleVFilter16NEON(uint8_t* p, int stride, int thresh) {
+ __asm__ volatile (
+ "sub %[p], %[p], %[stride], lsl #1 \n" // p -= 2 * stride
+
+ "vld1.u8 {q1}, [%[p]], %[stride] \n" // p1
+ "vld1.u8 {q2}, [%[p]], %[stride] \n" // p0
+ "vld1.u8 {q3}, [%[p]], %[stride] \n" // q0
+ "vld1.u8 {q12}, [%[p]] \n" // q1
+
+ DO_FILTER2(q1, q2, q3, q12, %[thresh])
+
+ "sub %[p], %[p], %[stride], lsl #1 \n" // p -= 2 * stride
+
+ "vst1.u8 {q2}, [%[p]], %[stride] \n" // store op0
+ "vst1.u8 {q3}, [%[p]] \n" // store oq0
+ : [p] "+r"(p)
+ : [stride] "r"(stride), [thresh] "r"(thresh)
+ : "memory", QRegs
+ );
+}
+
+static void SimpleHFilter16NEON(uint8_t* p, int stride, int thresh) {
+ __asm__ volatile (
+ "sub r4, %[p], #2 \n" // base1 = p - 2
+ "lsl r6, %[stride], #1 \n" // r6 = 2 * stride
+ "add r5, r4, %[stride] \n" // base2 = base1 + stride
+
+ LOAD8x4(d2, d3, d4, d5, [r4], [r5], r6)
+ LOAD8x4(d24, d25, d26, d27, [r4], [r5], r6)
+ "vswp d3, d24 \n" // p1:q1 p0:q3
+ "vswp d5, d26 \n" // q0:q2 q1:q4
+ "vswp q2, q12 \n" // p1:q1 p0:q2 q0:q3 q1:q4
+
+ DO_FILTER2(q1, q2, q12, q13, %[thresh])
+
+ "sub %[p], %[p], #1 \n" // p - 1
+
+ "vswp d5, d24 \n"
+ STORE8x2(d4, d5, [%[p]], %[stride])
+ STORE8x2(d24, d25, [%[p]], %[stride])
+
+ : [p] "+r"(p)
+ : [stride] "r"(stride), [thresh] "r"(thresh)
+ : "memory", "r4", "r5", "r6", QRegs
+ );
+}
+
+static void SimpleVFilter16iNEON(uint8_t* p, int stride, int thresh) {
+ int k;
+ for (k = 3; k > 0; --k) {
+ p += 4 * stride;
+ SimpleVFilter16NEON(p, stride, thresh);
+ }
+}
+
+static void SimpleHFilter16iNEON(uint8_t* p, int stride, int thresh) {
+ int k;
+ for (k = 3; k > 0; --k) {
+ p += 4;
+ SimpleHFilter16NEON(p, stride, thresh);
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Inverse transforms (Paragraph 14.4)
+
+static void TransformOne(const int16_t* in, uint8_t* dst) {
+ const int kBPS = BPS;
+ const int16_t constants[] = {20091, 17734, 0, 0};
+ /* kC1, kC2. Padded because vld1.16 loads 8 bytes
+ * Technically these are unsigned but vqdmulh is only available in signed.
+ * vqdmulh returns high half (effectively >> 16) but also doubles the value,
+ * changing the >> 16 to >> 15 and requiring an additional >> 1.
+ * We use this to our advantage with kC2. The canonical value is 35468.
+ * However, the high bit is set so treating it as signed will give incorrect
+ * results. We avoid this by down shifting by 1 here to clear the highest bit.
+ * Combined with the doubling effect of vqdmulh we get >> 16.
+ * This can not be applied to kC1 because the lowest bit is set. Down shifting
+ * the constant would reduce precision.
+ */
+
+ /* libwebp uses a trick to avoid some extra addition that libvpx does.
+ * Instead of:
+ * temp2 = ip[12] + ((ip[12] * cospi8sqrt2minus1) >> 16);
+ * libwebp adds 1 << 16 to cospi8sqrt2minus1 (kC1). However, this causes the
+ * same issue with kC1 and vqdmulh that we work around by down shifting kC2
+ */
+
+ /* Adapted from libvpx: vp8/common/arm/neon/shortidct4x4llm_neon.asm */
+ __asm__ volatile (
+ "vld1.16 {q1, q2}, [%[in]] \n"
+ "vld1.16 {d0}, [%[constants]] \n"
+
+ /* d2: in[0]
+ * d3: in[8]
+ * d4: in[4]
+ * d5: in[12]
+ */
+ "vswp d3, d4 \n"
+
+ /* q8 = {in[4], in[12]} * kC1 * 2 >> 16
+ * q9 = {in[4], in[12]} * kC2 >> 16
+ */
+ "vqdmulh.s16 q8, q2, d0[0] \n"
+ "vqdmulh.s16 q9, q2, d0[1] \n"
+
+ /* d22 = a = in[0] + in[8]
+ * d23 = b = in[0] - in[8]
+ */
+ "vqadd.s16 d22, d2, d3 \n"
+ "vqsub.s16 d23, d2, d3 \n"
+
+ /* The multiplication should be x * kC1 >> 16
+ * However, with vqdmulh we get x * kC1 * 2 >> 16
+ * (multiply, double, return high half)
+ * We avoided this in kC2 by pre-shifting the constant.
+ * q8 = in[4]/[12] * kC1 >> 16
+ */
+ "vshr.s16 q8, q8, #1 \n"
+
+ /* Add {in[4], in[12]} back after the multiplication. This is handled by
+ * adding 1 << 16 to kC1 in the libwebp C code.
+ */
+ "vqadd.s16 q8, q2, q8 \n"
+
+ /* d20 = c = in[4]*kC2 - in[12]*kC1
+ * d21 = d = in[4]*kC1 + in[12]*kC2
+ */
+ "vqsub.s16 d20, d18, d17 \n"
+ "vqadd.s16 d21, d19, d16 \n"
+
+ /* d2 = tmp[0] = a + d
+ * d3 = tmp[1] = b + c
+ * d4 = tmp[2] = b - c
+ * d5 = tmp[3] = a - d
+ */
+ "vqadd.s16 d2, d22, d21 \n"
+ "vqadd.s16 d3, d23, d20 \n"
+ "vqsub.s16 d4, d23, d20 \n"
+ "vqsub.s16 d5, d22, d21 \n"
+
+ "vzip.16 q1, q2 \n"
+ "vzip.16 q1, q2 \n"
+
+ "vswp d3, d4 \n"
+
+ /* q8 = {tmp[4], tmp[12]} * kC1 * 2 >> 16
+ * q9 = {tmp[4], tmp[12]} * kC2 >> 16
+ */
+ "vqdmulh.s16 q8, q2, d0[0] \n"
+ "vqdmulh.s16 q9, q2, d0[1] \n"
+
+ /* d22 = a = tmp[0] + tmp[8]
+ * d23 = b = tmp[0] - tmp[8]
+ */
+ "vqadd.s16 d22, d2, d3 \n"
+ "vqsub.s16 d23, d2, d3 \n"
+
+ /* See long winded explanations prior */
+ "vshr.s16 q8, q8, #1 \n"
+ "vqadd.s16 q8, q2, q8 \n"
+
+ /* d20 = c = in[4]*kC2 - in[12]*kC1
+ * d21 = d = in[4]*kC1 + in[12]*kC2
+ */
+ "vqsub.s16 d20, d18, d17 \n"
+ "vqadd.s16 d21, d19, d16 \n"
+
+ /* d2 = tmp[0] = a + d
+ * d3 = tmp[1] = b + c
+ * d4 = tmp[2] = b - c
+ * d5 = tmp[3] = a - d
+ */
+ "vqadd.s16 d2, d22, d21 \n"
+ "vqadd.s16 d3, d23, d20 \n"
+ "vqsub.s16 d4, d23, d20 \n"
+ "vqsub.s16 d5, d22, d21 \n"
+
+ "vld1.32 d6[0], [%[dst]], %[kBPS] \n"
+ "vld1.32 d6[1], [%[dst]], %[kBPS] \n"
+ "vld1.32 d7[0], [%[dst]], %[kBPS] \n"
+ "vld1.32 d7[1], [%[dst]], %[kBPS] \n"
+
+ "sub %[dst], %[dst], %[kBPS], lsl #2 \n"
+
+ /* (val) + 4 >> 3 */
+ "vrshr.s16 d2, d2, #3 \n"
+ "vrshr.s16 d3, d3, #3 \n"
+ "vrshr.s16 d4, d4, #3 \n"
+ "vrshr.s16 d5, d5, #3 \n"
+
+ "vzip.16 q1, q2 \n"
+ "vzip.16 q1, q2 \n"
+
+ /* Must accumulate before saturating */
+ "vmovl.u8 q8, d6 \n"
+ "vmovl.u8 q9, d7 \n"
+
+ "vqadd.s16 q1, q1, q8 \n"
+ "vqadd.s16 q2, q2, q9 \n"
+
+ "vqmovun.s16 d0, q1 \n"
+ "vqmovun.s16 d1, q2 \n"
+
+ "vst1.32 d0[0], [%[dst]], %[kBPS] \n"
+ "vst1.32 d0[1], [%[dst]], %[kBPS] \n"
+ "vst1.32 d1[0], [%[dst]], %[kBPS] \n"
+ "vst1.32 d1[1], [%[dst]] \n"
+
+ : [in] "+r"(in), [dst] "+r"(dst) /* modified registers */
+ : [kBPS] "r"(kBPS), [constants] "r"(constants) /* constants */
+ : "memory", "q0", "q1", "q2", "q8", "q9", "q10", "q11" /* clobbered */
+ );
+}
+
+static void TransformTwo(const int16_t* in, uint8_t* dst, int do_two) {
+ TransformOne(in, dst);
+ if (do_two) {
+ TransformOne(in + 16, dst + 4);
+ }
+}
+
+static void TransformDC(const int16_t* in, uint8_t* dst) {
+ const int DC = (in[0] + 4) >> 3;
+ const int kBPS = BPS;
+ __asm__ volatile (
+ "vdup.16 q1, %[DC] \n"
+
+ "vld1.32 d0[0], [%[dst]], %[kBPS] \n"
+ "vld1.32 d1[0], [%[dst]], %[kBPS] \n"
+ "vld1.32 d0[1], [%[dst]], %[kBPS] \n"
+ "vld1.32 d1[1], [%[dst]], %[kBPS] \n"
+
+ "sub %[dst], %[dst], %[kBPS], lsl #2 \n"
+
+ // add DC and convert to s16.
+ "vaddw.u8 q2, q1, d0 \n"
+ "vaddw.u8 q3, q1, d1 \n"
+ // convert back to u8 with saturation
+ "vqmovun.s16 d0, q2 \n"
+ "vqmovun.s16 d1, q3 \n"
+
+ "vst1.32 d0[0], [%[dst]], %[kBPS] \n"
+ "vst1.32 d1[0], [%[dst]], %[kBPS] \n"
+ "vst1.32 d0[1], [%[dst]], %[kBPS] \n"
+ "vst1.32 d1[1], [%[dst]] \n"
+ : [in] "+r"(in), [dst] "+r"(dst) /* modified registers */
+ : [kBPS] "r"(kBPS), /* constants */
+ [DC] "r"(DC)
+ : "memory", "q0", "q1", "q2", "q3" /* clobbered */
+ );
+}
+
+static void TransformWHT(const int16_t* in, int16_t* out) {
+ const int kStep = 32; // The store is only incrementing the pointer as if we
+ // had stored a single byte.
+ __asm__ volatile (
+ // part 1
+ // load data into q0, q1
+ "vld1.16 {q0, q1}, [%[in]] \n"
+
+ "vaddl.s16 q2, d0, d3 \n" // a0 = in[0] + in[12]
+ "vaddl.s16 q3, d1, d2 \n" // a1 = in[4] + in[8]
+ "vsubl.s16 q10, d1, d2 \n" // a2 = in[4] - in[8]
+ "vsubl.s16 q11, d0, d3 \n" // a3 = in[0] - in[12]
+
+ "vadd.s32 q0, q2, q3 \n" // tmp[0] = a0 + a1
+ "vsub.s32 q2, q2, q3 \n" // tmp[8] = a0 - a1
+ "vadd.s32 q1, q11, q10 \n" // tmp[4] = a3 + a2
+ "vsub.s32 q3, q11, q10 \n" // tmp[12] = a3 - a2
+
+ // Transpose
+ // q0 = tmp[0, 4, 8, 12], q1 = tmp[2, 6, 10, 14]
+ // q2 = tmp[1, 5, 9, 13], q3 = tmp[3, 7, 11, 15]
+ "vswp d1, d4 \n" // vtrn.64 q0, q2
+ "vswp d3, d6 \n" // vtrn.64 q1, q3
+ "vtrn.32 q0, q1 \n"
+ "vtrn.32 q2, q3 \n"
+
+ "vmov.s32 q10, #3 \n" // dc = 3
+ "vadd.s32 q0, q0, q10 \n" // dc = tmp[0] + 3
+ "vadd.s32 q12, q0, q3 \n" // a0 = dc + tmp[3]
+ "vadd.s32 q13, q1, q2 \n" // a1 = tmp[1] + tmp[2]
+ "vsub.s32 q8, q1, q2 \n" // a2 = tmp[1] - tmp[2]
+ "vsub.s32 q9, q0, q3 \n" // a3 = dc - tmp[3]
+
+ "vadd.s32 q0, q12, q13 \n"
+ "vshrn.s32 d0, q0, #3 \n" // (a0 + a1) >> 3
+ "vadd.s32 q1, q9, q8 \n"
+ "vshrn.s32 d1, q1, #3 \n" // (a3 + a2) >> 3
+ "vsub.s32 q2, q12, q13 \n"
+ "vshrn.s32 d2, q2, #3 \n" // (a0 - a1) >> 3
+ "vsub.s32 q3, q9, q8 \n"
+ "vshrn.s32 d3, q3, #3 \n" // (a3 - a2) >> 3
+
+ // set the results to output
+ "vst1.16 d0[0], [%[out]], %[kStep] \n"
+ "vst1.16 d1[0], [%[out]], %[kStep] \n"
+ "vst1.16 d2[0], [%[out]], %[kStep] \n"
+ "vst1.16 d3[0], [%[out]], %[kStep] \n"
+ "vst1.16 d0[1], [%[out]], %[kStep] \n"
+ "vst1.16 d1[1], [%[out]], %[kStep] \n"
+ "vst1.16 d2[1], [%[out]], %[kStep] \n"
+ "vst1.16 d3[1], [%[out]], %[kStep] \n"
+ "vst1.16 d0[2], [%[out]], %[kStep] \n"
+ "vst1.16 d1[2], [%[out]], %[kStep] \n"
+ "vst1.16 d2[2], [%[out]], %[kStep] \n"
+ "vst1.16 d3[2], [%[out]], %[kStep] \n"
+ "vst1.16 d0[3], [%[out]], %[kStep] \n"
+ "vst1.16 d1[3], [%[out]], %[kStep] \n"
+ "vst1.16 d2[3], [%[out]], %[kStep] \n"
+ "vst1.16 d3[3], [%[out]], %[kStep] \n"
+
+ : [out] "+r"(out) // modified registers
+ : [in] "r"(in), [kStep] "r"(kStep) // constants
+ : "memory", "q0", "q1", "q2", "q3",
+ "q8", "q9", "q10", "q11", "q12", "q13" // clobbered
+ );
+}
+
+#endif // WEBP_USE_NEON
+
+//------------------------------------------------------------------------------
+// Entry point
+
+extern void VP8DspInitNEON(void);
+
+void VP8DspInitNEON(void) {
+#if defined(WEBP_USE_NEON)
+ VP8Transform = TransformTwo;
+ VP8TransformAC3 = TransformOne; // no special code here
+ VP8TransformDC = TransformDC;
+ VP8TransformWHT = TransformWHT;
+
+ VP8SimpleVFilter16 = SimpleVFilter16NEON;
+ VP8SimpleHFilter16 = SimpleHFilter16NEON;
+ VP8SimpleVFilter16i = SimpleVFilter16iNEON;
+ VP8SimpleHFilter16i = SimpleHFilter16iNEON;
+#endif // WEBP_USE_NEON
+}
+
diff --git a/drivers/webp/dsp/dec_sse2.c b/drivers/webp/dsp/dec_sse2.c
new file mode 100644
index 000000000..150c559f1
--- /dev/null
+++ b/drivers/webp/dsp/dec_sse2.c
@@ -0,0 +1,956 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the COPYING file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+// -----------------------------------------------------------------------------
+//
+// SSE2 version of some decoding functions (idct, loop filtering).
+//
+// Author: somnath@google.com (Somnath Banerjee)
+// cduvivier@google.com (Christian Duvivier)
+
+#include "./dsp.h"
+
+#if defined(WEBP_USE_SSE2)
+
+// The 3-coeff sparse transform in SSE2 is not really faster than the plain-C
+// one it seems => disable it by default. Uncomment the following to enable:
+// #define USE_TRANSFORM_AC3
+
+#include <emmintrin.h>
+#include "../dec/vp8i.h"
+
+//------------------------------------------------------------------------------
+// Transforms (Paragraph 14.4)
+
+static void TransformSSE2(const int16_t* in, uint8_t* dst, int do_two) {
+ // This implementation makes use of 16-bit fixed point versions of two
+ // multiply constants:
+ // K1 = sqrt(2) * cos (pi/8) ~= 85627 / 2^16
+ // K2 = sqrt(2) * sin (pi/8) ~= 35468 / 2^16
+ //
+ // To be able to use signed 16-bit integers, we use the following trick to
+ // have constants within range:
+ // - Associated constants are obtained by subtracting the 16-bit fixed point
+ // version of one:
+ // k = K - (1 << 16) => K = k + (1 << 16)
+ // K1 = 85267 => k1 = 20091
+ // K2 = 35468 => k2 = -30068
+ // - The multiplication of a variable by a constant become the sum of the
+ // variable and the multiplication of that variable by the associated
+ // constant:
+ // (x * K) >> 16 = (x * (k + (1 << 16))) >> 16 = ((x * k ) >> 16) + x
+ const __m128i k1 = _mm_set1_epi16(20091);
+ const __m128i k2 = _mm_set1_epi16(-30068);
+ __m128i T0, T1, T2, T3;
+
+ // Load and concatenate the transform coefficients (we'll do two transforms
+ // in parallel). In the case of only one transform, the second half of the
+ // vectors will just contain random value we'll never use nor store.
+ __m128i in0, in1, in2, in3;
+ {
+ in0 = _mm_loadl_epi64((__m128i*)&in[0]);
+ in1 = _mm_loadl_epi64((__m128i*)&in[4]);
+ in2 = _mm_loadl_epi64((__m128i*)&in[8]);
+ in3 = _mm_loadl_epi64((__m128i*)&in[12]);
+ // a00 a10 a20 a30 x x x x
+ // a01 a11 a21 a31 x x x x
+ // a02 a12 a22 a32 x x x x
+ // a03 a13 a23 a33 x x x x
+ if (do_two) {
+ const __m128i inB0 = _mm_loadl_epi64((__m128i*)&in[16]);
+ const __m128i inB1 = _mm_loadl_epi64((__m128i*)&in[20]);
+ const __m128i inB2 = _mm_loadl_epi64((__m128i*)&in[24]);
+ const __m128i inB3 = _mm_loadl_epi64((__m128i*)&in[28]);
+ in0 = _mm_unpacklo_epi64(in0, inB0);
+ in1 = _mm_unpacklo_epi64(in1, inB1);
+ in2 = _mm_unpacklo_epi64(in2, inB2);
+ in3 = _mm_unpacklo_epi64(in3, inB3);
+ // a00 a10 a20 a30 b00 b10 b20 b30
+ // a01 a11 a21 a31 b01 b11 b21 b31
+ // a02 a12 a22 a32 b02 b12 b22 b32
+ // a03 a13 a23 a33 b03 b13 b23 b33
+ }
+ }
+
+ // Vertical pass and subsequent transpose.
+ {
+ // First pass, c and d calculations are longer because of the "trick"
+ // multiplications.
+ const __m128i a = _mm_add_epi16(in0, in2);
+ const __m128i b = _mm_sub_epi16(in0, in2);
+ // c = MUL(in1, K2) - MUL(in3, K1) = MUL(in1, k2) - MUL(in3, k1) + in1 - in3
+ const __m128i c1 = _mm_mulhi_epi16(in1, k2);
+ const __m128i c2 = _mm_mulhi_epi16(in3, k1);
+ const __m128i c3 = _mm_sub_epi16(in1, in3);
+ const __m128i c4 = _mm_sub_epi16(c1, c2);
+ const __m128i c = _mm_add_epi16(c3, c4);
+ // d = MUL(in1, K1) + MUL(in3, K2) = MUL(in1, k1) + MUL(in3, k2) + in1 + in3
+ const __m128i d1 = _mm_mulhi_epi16(in1, k1);
+ const __m128i d2 = _mm_mulhi_epi16(in3, k2);
+ const __m128i d3 = _mm_add_epi16(in1, in3);
+ const __m128i d4 = _mm_add_epi16(d1, d2);
+ const __m128i d = _mm_add_epi16(d3, d4);
+
+ // Second pass.
+ const __m128i tmp0 = _mm_add_epi16(a, d);
+ const __m128i tmp1 = _mm_add_epi16(b, c);
+ const __m128i tmp2 = _mm_sub_epi16(b, c);
+ const __m128i tmp3 = _mm_sub_epi16(a, d);
+
+ // Transpose the two 4x4.
+ // a00 a01 a02 a03 b00 b01 b02 b03
+ // a10 a11 a12 a13 b10 b11 b12 b13
+ // a20 a21 a22 a23 b20 b21 b22 b23
+ // a30 a31 a32 a33 b30 b31 b32 b33
+ const __m128i transpose0_0 = _mm_unpacklo_epi16(tmp0, tmp1);
+ const __m128i transpose0_1 = _mm_unpacklo_epi16(tmp2, tmp3);
+ const __m128i transpose0_2 = _mm_unpackhi_epi16(tmp0, tmp1);
+ const __m128i transpose0_3 = _mm_unpackhi_epi16(tmp2, tmp3);
+ // a00 a10 a01 a11 a02 a12 a03 a13
+ // a20 a30 a21 a31 a22 a32 a23 a33
+ // b00 b10 b01 b11 b02 b12 b03 b13
+ // b20 b30 b21 b31 b22 b32 b23 b33
+ const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1);
+ const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3);
+ const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1);
+ const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3);
+ // a00 a10 a20 a30 a01 a11 a21 a31
+ // b00 b10 b20 b30 b01 b11 b21 b31
+ // a02 a12 a22 a32 a03 a13 a23 a33
+ // b02 b12 a22 b32 b03 b13 b23 b33
+ T0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1);
+ T1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1);
+ T2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3);
+ T3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3);
+ // a00 a10 a20 a30 b00 b10 b20 b30
+ // a01 a11 a21 a31 b01 b11 b21 b31
+ // a02 a12 a22 a32 b02 b12 b22 b32
+ // a03 a13 a23 a33 b03 b13 b23 b33
+ }
+
+ // Horizontal pass and subsequent transpose.
+ {
+ // First pass, c and d calculations are longer because of the "trick"
+ // multiplications.
+ const __m128i four = _mm_set1_epi16(4);
+ const __m128i dc = _mm_add_epi16(T0, four);
+ const __m128i a = _mm_add_epi16(dc, T2);
+ const __m128i b = _mm_sub_epi16(dc, T2);
+ // c = MUL(T1, K2) - MUL(T3, K1) = MUL(T1, k2) - MUL(T3, k1) + T1 - T3
+ const __m128i c1 = _mm_mulhi_epi16(T1, k2);
+ const __m128i c2 = _mm_mulhi_epi16(T3, k1);
+ const __m128i c3 = _mm_sub_epi16(T1, T3);
+ const __m128i c4 = _mm_sub_epi16(c1, c2);
+ const __m128i c = _mm_add_epi16(c3, c4);
+ // d = MUL(T1, K1) + MUL(T3, K2) = MUL(T1, k1) + MUL(T3, k2) + T1 + T3
+ const __m128i d1 = _mm_mulhi_epi16(T1, k1);
+ const __m128i d2 = _mm_mulhi_epi16(T3, k2);
+ const __m128i d3 = _mm_add_epi16(T1, T3);
+ const __m128i d4 = _mm_add_epi16(d1, d2);
+ const __m128i d = _mm_add_epi16(d3, d4);
+
+ // Second pass.
+ const __m128i tmp0 = _mm_add_epi16(a, d);
+ const __m128i tmp1 = _mm_add_epi16(b, c);
+ const __m128i tmp2 = _mm_sub_epi16(b, c);
+ const __m128i tmp3 = _mm_sub_epi16(a, d);
+ const __m128i shifted0 = _mm_srai_epi16(tmp0, 3);
+ const __m128i shifted1 = _mm_srai_epi16(tmp1, 3);
+ const __m128i shifted2 = _mm_srai_epi16(tmp2, 3);
+ const __m128i shifted3 = _mm_srai_epi16(tmp3, 3);
+
+ // Transpose the two 4x4.
+ // a00 a01 a02 a03 b00 b01 b02 b03
+ // a10 a11 a12 a13 b10 b11 b12 b13
+ // a20 a21 a22 a23 b20 b21 b22 b23
+ // a30 a31 a32 a33 b30 b31 b32 b33
+ const __m128i transpose0_0 = _mm_unpacklo_epi16(shifted0, shifted1);
+ const __m128i transpose0_1 = _mm_unpacklo_epi16(shifted2, shifted3);
+ const __m128i transpose0_2 = _mm_unpackhi_epi16(shifted0, shifted1);
+ const __m128i transpose0_3 = _mm_unpackhi_epi16(shifted2, shifted3);
+ // a00 a10 a01 a11 a02 a12 a03 a13
+ // a20 a30 a21 a31 a22 a32 a23 a33
+ // b00 b10 b01 b11 b02 b12 b03 b13
+ // b20 b30 b21 b31 b22 b32 b23 b33
+ const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1);
+ const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3);
+ const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1);
+ const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3);
+ // a00 a10 a20 a30 a01 a11 a21 a31
+ // b00 b10 b20 b30 b01 b11 b21 b31
+ // a02 a12 a22 a32 a03 a13 a23 a33
+ // b02 b12 a22 b32 b03 b13 b23 b33
+ T0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1);
+ T1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1);
+ T2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3);
+ T3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3);
+ // a00 a10 a20 a30 b00 b10 b20 b30
+ // a01 a11 a21 a31 b01 b11 b21 b31
+ // a02 a12 a22 a32 b02 b12 b22 b32
+ // a03 a13 a23 a33 b03 b13 b23 b33
+ }
+
+ // Add inverse transform to 'dst' and store.
+ {
+ const __m128i zero = _mm_setzero_si128();
+ // Load the reference(s).
+ __m128i dst0, dst1, dst2, dst3;
+ if (do_two) {
+ // Load eight bytes/pixels per line.
+ dst0 = _mm_loadl_epi64((__m128i*)(dst + 0 * BPS));
+ dst1 = _mm_loadl_epi64((__m128i*)(dst + 1 * BPS));
+ dst2 = _mm_loadl_epi64((__m128i*)(dst + 2 * BPS));
+ dst3 = _mm_loadl_epi64((__m128i*)(dst + 3 * BPS));
+ } else {
+ // Load four bytes/pixels per line.
+ dst0 = _mm_cvtsi32_si128(*(int*)(dst + 0 * BPS));
+ dst1 = _mm_cvtsi32_si128(*(int*)(dst + 1 * BPS));
+ dst2 = _mm_cvtsi32_si128(*(int*)(dst + 2 * BPS));
+ dst3 = _mm_cvtsi32_si128(*(int*)(dst + 3 * BPS));
+ }
+ // Convert to 16b.
+ dst0 = _mm_unpacklo_epi8(dst0, zero);
+ dst1 = _mm_unpacklo_epi8(dst1, zero);
+ dst2 = _mm_unpacklo_epi8(dst2, zero);
+ dst3 = _mm_unpacklo_epi8(dst3, zero);
+ // Add the inverse transform(s).
+ dst0 = _mm_add_epi16(dst0, T0);
+ dst1 = _mm_add_epi16(dst1, T1);
+ dst2 = _mm_add_epi16(dst2, T2);
+ dst3 = _mm_add_epi16(dst3, T3);
+ // Unsigned saturate to 8b.
+ dst0 = _mm_packus_epi16(dst0, dst0);
+ dst1 = _mm_packus_epi16(dst1, dst1);
+ dst2 = _mm_packus_epi16(dst2, dst2);
+ dst3 = _mm_packus_epi16(dst3, dst3);
+ // Store the results.
+ if (do_two) {
+ // Store eight bytes/pixels per line.
+ _mm_storel_epi64((__m128i*)(dst + 0 * BPS), dst0);
+ _mm_storel_epi64((__m128i*)(dst + 1 * BPS), dst1);
+ _mm_storel_epi64((__m128i*)(dst + 2 * BPS), dst2);
+ _mm_storel_epi64((__m128i*)(dst + 3 * BPS), dst3);
+ } else {
+ // Store four bytes/pixels per line.
+ *(int*)(dst + 0 * BPS) = _mm_cvtsi128_si32(dst0);
+ *(int*)(dst + 1 * BPS) = _mm_cvtsi128_si32(dst1);
+ *(int*)(dst + 2 * BPS) = _mm_cvtsi128_si32(dst2);
+ *(int*)(dst + 3 * BPS) = _mm_cvtsi128_si32(dst3);
+ }
+ }
+}
+
+#if defined(USE_TRANSFORM_AC3)
+#define MUL(a, b) (((a) * (b)) >> 16)
+static void TransformAC3SSE2(const int16_t* in, uint8_t* dst) {
+ static const int kC1 = 20091 + (1 << 16);
+ static const int kC2 = 35468;
+ const __m128i A = _mm_set1_epi16(in[0] + 4);
+ const __m128i c4 = _mm_set1_epi16(MUL(in[4], kC2));
+ const __m128i d4 = _mm_set1_epi16(MUL(in[4], kC1));
+ const int c1 = MUL(in[1], kC2);
+ const int d1 = MUL(in[1], kC1);
+ const __m128i CD = _mm_set_epi16(0, 0, 0, 0, -d1, -c1, c1, d1);
+ const __m128i B = _mm_adds_epi16(A, CD);
+ const __m128i m0 = _mm_adds_epi16(B, d4);
+ const __m128i m1 = _mm_adds_epi16(B, c4);
+ const __m128i m2 = _mm_subs_epi16(B, c4);
+ const __m128i m3 = _mm_subs_epi16(B, d4);
+ const __m128i zero = _mm_setzero_si128();
+ // Load the source pixels.
+ __m128i dst0 = _mm_cvtsi32_si128(*(int*)(dst + 0 * BPS));
+ __m128i dst1 = _mm_cvtsi32_si128(*(int*)(dst + 1 * BPS));
+ __m128i dst2 = _mm_cvtsi32_si128(*(int*)(dst + 2 * BPS));
+ __m128i dst3 = _mm_cvtsi32_si128(*(int*)(dst + 3 * BPS));
+ // Convert to 16b.
+ dst0 = _mm_unpacklo_epi8(dst0, zero);
+ dst1 = _mm_unpacklo_epi8(dst1, zero);
+ dst2 = _mm_unpacklo_epi8(dst2, zero);
+ dst3 = _mm_unpacklo_epi8(dst3, zero);
+ // Add the inverse transform.
+ dst0 = _mm_adds_epi16(dst0, _mm_srai_epi16(m0, 3));
+ dst1 = _mm_adds_epi16(dst1, _mm_srai_epi16(m1, 3));
+ dst2 = _mm_adds_epi16(dst2, _mm_srai_epi16(m2, 3));
+ dst3 = _mm_adds_epi16(dst3, _mm_srai_epi16(m3, 3));
+ // Unsigned saturate to 8b.
+ dst0 = _mm_packus_epi16(dst0, dst0);
+ dst1 = _mm_packus_epi16(dst1, dst1);
+ dst2 = _mm_packus_epi16(dst2, dst2);
+ dst3 = _mm_packus_epi16(dst3, dst3);
+ // Store the results.
+ *(int*)(dst + 0 * BPS) = _mm_cvtsi128_si32(dst0);
+ *(int*)(dst + 1 * BPS) = _mm_cvtsi128_si32(dst1);
+ *(int*)(dst + 2 * BPS) = _mm_cvtsi128_si32(dst2);
+ *(int*)(dst + 3 * BPS) = _mm_cvtsi128_si32(dst3);
+}
+#undef MUL
+#endif // USE_TRANSFORM_AC3
+
+//------------------------------------------------------------------------------
+// Loop Filter (Paragraph 15)
+
+// Compute abs(p - q) = subs(p - q) OR subs(q - p)
+#define MM_ABS(p, q) _mm_or_si128( \
+ _mm_subs_epu8((q), (p)), \
+ _mm_subs_epu8((p), (q)))
+
+// Shift each byte of "a" by N bits while preserving by the sign bit.
+//
+// It first shifts the lower bytes of the words and then the upper bytes and
+// then merges the results together.
+#define SIGNED_SHIFT_N(a, N) { \
+ __m128i t = a; \
+ t = _mm_slli_epi16(t, 8); \
+ t = _mm_srai_epi16(t, N); \
+ t = _mm_srli_epi16(t, 8); \
+ \
+ a = _mm_srai_epi16(a, N + 8); \
+ a = _mm_slli_epi16(a, 8); \
+ \
+ a = _mm_or_si128(t, a); \
+}
+
+#define FLIP_SIGN_BIT2(a, b) { \
+ a = _mm_xor_si128(a, sign_bit); \
+ b = _mm_xor_si128(b, sign_bit); \
+}
+
+#define FLIP_SIGN_BIT4(a, b, c, d) { \
+ FLIP_SIGN_BIT2(a, b); \
+ FLIP_SIGN_BIT2(c, d); \
+}
+
+#define GET_NOTHEV(p1, p0, q0, q1, hev_thresh, not_hev) { \
+ const __m128i zero = _mm_setzero_si128(); \
+ const __m128i t_1 = MM_ABS(p1, p0); \
+ const __m128i t_2 = MM_ABS(q1, q0); \
+ \
+ const __m128i h = _mm_set1_epi8(hev_thresh); \
+ const __m128i t_3 = _mm_subs_epu8(t_1, h); /* abs(p1 - p0) - hev_tresh */ \
+ const __m128i t_4 = _mm_subs_epu8(t_2, h); /* abs(q1 - q0) - hev_tresh */ \
+ \
+ not_hev = _mm_or_si128(t_3, t_4); \
+ not_hev = _mm_cmpeq_epi8(not_hev, zero); /* not_hev <= t1 && not_hev <= t2 */\
+}
+
+#define GET_BASE_DELTA(p1, p0, q0, q1, o) { \
+ const __m128i qp0 = _mm_subs_epi8(q0, p0); /* q0 - p0 */ \
+ o = _mm_subs_epi8(p1, q1); /* p1 - q1 */ \
+ o = _mm_adds_epi8(o, qp0); /* p1 - q1 + 1 * (q0 - p0) */ \
+ o = _mm_adds_epi8(o, qp0); /* p1 - q1 + 2 * (q0 - p0) */ \
+ o = _mm_adds_epi8(o, qp0); /* p1 - q1 + 3 * (q0 - p0) */ \
+}
+
+#define DO_SIMPLE_FILTER(p0, q0, fl) { \
+ const __m128i three = _mm_set1_epi8(3); \
+ const __m128i four = _mm_set1_epi8(4); \
+ __m128i v3 = _mm_adds_epi8(fl, three); \
+ __m128i v4 = _mm_adds_epi8(fl, four); \
+ \
+ /* Do +4 side */ \
+ SIGNED_SHIFT_N(v4, 3); /* v4 >> 3 */ \
+ q0 = _mm_subs_epi8(q0, v4); /* q0 -= v4 */ \
+ \
+ /* Now do +3 side */ \
+ SIGNED_SHIFT_N(v3, 3); /* v3 >> 3 */ \
+ p0 = _mm_adds_epi8(p0, v3); /* p0 += v3 */ \
+}
+
+// Updates values of 2 pixels at MB edge during complex filtering.
+// Update operations:
+// q = q - delta and p = p + delta; where delta = [(a_hi >> 7), (a_lo >> 7)]
+#define UPDATE_2PIXELS(pi, qi, a_lo, a_hi) { \
+ const __m128i a_lo7 = _mm_srai_epi16(a_lo, 7); \
+ const __m128i a_hi7 = _mm_srai_epi16(a_hi, 7); \
+ const __m128i delta = _mm_packs_epi16(a_lo7, a_hi7); \
+ pi = _mm_adds_epi8(pi, delta); \
+ qi = _mm_subs_epi8(qi, delta); \
+}
+
+static void NeedsFilter(const __m128i* p1, const __m128i* p0, const __m128i* q0,
+ const __m128i* q1, int thresh, __m128i *mask) {
+ __m128i t1 = MM_ABS(*p1, *q1); // abs(p1 - q1)
+ *mask = _mm_set1_epi8(0xFE);
+ t1 = _mm_and_si128(t1, *mask); // set lsb of each byte to zero
+ t1 = _mm_srli_epi16(t1, 1); // abs(p1 - q1) / 2
+
+ *mask = MM_ABS(*p0, *q0); // abs(p0 - q0)
+ *mask = _mm_adds_epu8(*mask, *mask); // abs(p0 - q0) * 2
+ *mask = _mm_adds_epu8(*mask, t1); // abs(p0 - q0) * 2 + abs(p1 - q1) / 2
+
+ t1 = _mm_set1_epi8(thresh);
+ *mask = _mm_subs_epu8(*mask, t1); // mask <= thresh
+ *mask = _mm_cmpeq_epi8(*mask, _mm_setzero_si128());
+}
+
+//------------------------------------------------------------------------------
+// Edge filtering functions
+
+// Applies filter on 2 pixels (p0 and q0)
+static WEBP_INLINE void DoFilter2(const __m128i* p1, __m128i* p0, __m128i* q0,
+ const __m128i* q1, int thresh) {
+ __m128i a, mask;
+ const __m128i sign_bit = _mm_set1_epi8(0x80);
+ const __m128i p1s = _mm_xor_si128(*p1, sign_bit);
+ const __m128i q1s = _mm_xor_si128(*q1, sign_bit);
+
+ NeedsFilter(p1, p0, q0, q1, thresh, &mask);
+
+ // convert to signed values
+ FLIP_SIGN_BIT2(*p0, *q0);
+
+ GET_BASE_DELTA(p1s, *p0, *q0, q1s, a);
+ a = _mm_and_si128(a, mask); // mask filter values we don't care about
+ DO_SIMPLE_FILTER(*p0, *q0, a);
+
+ // unoffset
+ FLIP_SIGN_BIT2(*p0, *q0);
+}
+
+// Applies filter on 4 pixels (p1, p0, q0 and q1)
+static WEBP_INLINE void DoFilter4(__m128i* p1, __m128i *p0,
+ __m128i* q0, __m128i* q1,
+ const __m128i* mask, int hev_thresh) {
+ __m128i not_hev;
+ __m128i t1, t2, t3;
+ const __m128i sign_bit = _mm_set1_epi8(0x80);
+
+ // compute hev mask
+ GET_NOTHEV(*p1, *p0, *q0, *q1, hev_thresh, not_hev);
+
+ // convert to signed values
+ FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1);
+
+ t1 = _mm_subs_epi8(*p1, *q1); // p1 - q1
+ t1 = _mm_andnot_si128(not_hev, t1); // hev(p1 - q1)
+ t2 = _mm_subs_epi8(*q0, *p0); // q0 - p0
+ t1 = _mm_adds_epi8(t1, t2); // hev(p1 - q1) + 1 * (q0 - p0)
+ t1 = _mm_adds_epi8(t1, t2); // hev(p1 - q1) + 2 * (q0 - p0)
+ t1 = _mm_adds_epi8(t1, t2); // hev(p1 - q1) + 3 * (q0 - p0)
+ t1 = _mm_and_si128(t1, *mask); // mask filter values we don't care about
+
+ // Do +4 side
+ t2 = _mm_set1_epi8(4);
+ t2 = _mm_adds_epi8(t1, t2); // 3 * (q0 - p0) + (p1 - q1) + 4
+ SIGNED_SHIFT_N(t2, 3); // (3 * (q0 - p0) + hev(p1 - q1) + 4) >> 3
+ t3 = t2; // save t2
+ *q0 = _mm_subs_epi8(*q0, t2); // q0 -= t2
+
+ // Now do +3 side
+ t2 = _mm_set1_epi8(3);
+ t2 = _mm_adds_epi8(t1, t2); // +3 instead of +4
+ SIGNED_SHIFT_N(t2, 3); // (3 * (q0 - p0) + hev(p1 - q1) + 3) >> 3
+ *p0 = _mm_adds_epi8(*p0, t2); // p0 += t2
+
+ t2 = _mm_set1_epi8(1);
+ t3 = _mm_adds_epi8(t3, t2);
+ SIGNED_SHIFT_N(t3, 1); // (3 * (q0 - p0) + hev(p1 - q1) + 4) >> 4
+
+ t3 = _mm_and_si128(not_hev, t3); // if !hev
+ *q1 = _mm_subs_epi8(*q1, t3); // q1 -= t3
+ *p1 = _mm_adds_epi8(*p1, t3); // p1 += t3
+
+ // unoffset
+ FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1);
+}
+
+// Applies filter on 6 pixels (p2, p1, p0, q0, q1 and q2)
+static WEBP_INLINE void DoFilter6(__m128i *p2, __m128i* p1, __m128i *p0,
+ __m128i* q0, __m128i* q1, __m128i *q2,
+ const __m128i* mask, int hev_thresh) {
+ __m128i a, not_hev;
+ const __m128i sign_bit = _mm_set1_epi8(0x80);
+
+ // compute hev mask
+ GET_NOTHEV(*p1, *p0, *q0, *q1, hev_thresh, not_hev);
+
+ // convert to signed values
+ FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1);
+ FLIP_SIGN_BIT2(*p2, *q2);
+
+ GET_BASE_DELTA(*p1, *p0, *q0, *q1, a);
+
+ { // do simple filter on pixels with hev
+ const __m128i m = _mm_andnot_si128(not_hev, *mask);
+ const __m128i f = _mm_and_si128(a, m);
+ DO_SIMPLE_FILTER(*p0, *q0, f);
+ }
+ { // do strong filter on pixels with not hev
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i nine = _mm_set1_epi16(0x0900);
+ const __m128i sixty_three = _mm_set1_epi16(63);
+
+ const __m128i m = _mm_and_si128(not_hev, *mask);
+ const __m128i f = _mm_and_si128(a, m);
+ const __m128i f_lo = _mm_unpacklo_epi8(zero, f);
+ const __m128i f_hi = _mm_unpackhi_epi8(zero, f);
+
+ const __m128i f9_lo = _mm_mulhi_epi16(f_lo, nine); // Filter (lo) * 9
+ const __m128i f9_hi = _mm_mulhi_epi16(f_hi, nine); // Filter (hi) * 9
+ const __m128i f18_lo = _mm_add_epi16(f9_lo, f9_lo); // Filter (lo) * 18
+ const __m128i f18_hi = _mm_add_epi16(f9_hi, f9_hi); // Filter (hi) * 18
+
+ const __m128i a2_lo = _mm_add_epi16(f9_lo, sixty_three); // Filter * 9 + 63
+ const __m128i a2_hi = _mm_add_epi16(f9_hi, sixty_three); // Filter * 9 + 63
+
+ const __m128i a1_lo = _mm_add_epi16(f18_lo, sixty_three); // F... * 18 + 63
+ const __m128i a1_hi = _mm_add_epi16(f18_hi, sixty_three); // F... * 18 + 63
+
+ const __m128i a0_lo = _mm_add_epi16(f18_lo, a2_lo); // Filter * 27 + 63
+ const __m128i a0_hi = _mm_add_epi16(f18_hi, a2_hi); // Filter * 27 + 63
+
+ UPDATE_2PIXELS(*p2, *q2, a2_lo, a2_hi);
+ UPDATE_2PIXELS(*p1, *q1, a1_lo, a1_hi);
+ UPDATE_2PIXELS(*p0, *q0, a0_lo, a0_hi);
+ }
+
+ // unoffset
+ FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1);
+ FLIP_SIGN_BIT2(*p2, *q2);
+}
+
+// reads 8 rows across a vertical edge.
+//
+// TODO(somnath): Investigate _mm_shuffle* also see if it can be broken into
+// two Load4x4() to avoid code duplication.
+static WEBP_INLINE void Load8x4(const uint8_t* b, int stride,
+ __m128i* p, __m128i* q) {
+ __m128i t1, t2;
+
+ // Load 0th, 1st, 4th and 5th rows
+ __m128i r0 = _mm_cvtsi32_si128(*((int*)&b[0 * stride])); // 03 02 01 00
+ __m128i r1 = _mm_cvtsi32_si128(*((int*)&b[1 * stride])); // 13 12 11 10
+ __m128i r4 = _mm_cvtsi32_si128(*((int*)&b[4 * stride])); // 43 42 41 40
+ __m128i r5 = _mm_cvtsi32_si128(*((int*)&b[5 * stride])); // 53 52 51 50
+
+ r0 = _mm_unpacklo_epi32(r0, r4); // 43 42 41 40 03 02 01 00
+ r1 = _mm_unpacklo_epi32(r1, r5); // 53 52 51 50 13 12 11 10
+
+ // t1 = 53 43 52 42 51 41 50 40 13 03 12 02 11 01 10 00
+ t1 = _mm_unpacklo_epi8(r0, r1);
+
+ // Load 2nd, 3rd, 6th and 7th rows
+ r0 = _mm_cvtsi32_si128(*((int*)&b[2 * stride])); // 23 22 21 22
+ r1 = _mm_cvtsi32_si128(*((int*)&b[3 * stride])); // 33 32 31 30
+ r4 = _mm_cvtsi32_si128(*((int*)&b[6 * stride])); // 63 62 61 60
+ r5 = _mm_cvtsi32_si128(*((int*)&b[7 * stride])); // 73 72 71 70
+
+ r0 = _mm_unpacklo_epi32(r0, r4); // 63 62 61 60 23 22 21 20
+ r1 = _mm_unpacklo_epi32(r1, r5); // 73 72 71 70 33 32 31 30
+
+ // t2 = 73 63 72 62 71 61 70 60 33 23 32 22 31 21 30 20
+ t2 = _mm_unpacklo_epi8(r0, r1);
+
+ // t1 = 33 23 13 03 32 22 12 02 31 21 11 01 30 20 10 00
+ // t2 = 73 63 53 43 72 62 52 42 71 61 51 41 70 60 50 40
+ r0 = t1;
+ t1 = _mm_unpacklo_epi16(t1, t2);
+ t2 = _mm_unpackhi_epi16(r0, t2);
+
+ // *p = 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00
+ // *q = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02
+ *p = _mm_unpacklo_epi32(t1, t2);
+ *q = _mm_unpackhi_epi32(t1, t2);
+}
+
+static WEBP_INLINE void Load16x4(const uint8_t* r0, const uint8_t* r8,
+ int stride,
+ __m128i* p1, __m128i* p0,
+ __m128i* q0, __m128i* q1) {
+ __m128i t1, t2;
+ // Assume the pixels around the edge (|) are numbered as follows
+ // 00 01 | 02 03
+ // 10 11 | 12 13
+ // ... | ...
+ // e0 e1 | e2 e3
+ // f0 f1 | f2 f3
+ //
+ // r0 is pointing to the 0th row (00)
+ // r8 is pointing to the 8th row (80)
+
+ // Load
+ // p1 = 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00
+ // q0 = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02
+ // p0 = f1 e1 d1 c1 b1 a1 91 81 f0 e0 d0 c0 b0 a0 90 80
+ // q1 = f3 e3 d3 c3 b3 a3 93 83 f2 e2 d2 c2 b2 a2 92 82
+ Load8x4(r0, stride, p1, q0);
+ Load8x4(r8, stride, p0, q1);
+
+ t1 = *p1;
+ t2 = *q0;
+ // p1 = f0 e0 d0 c0 b0 a0 90 80 70 60 50 40 30 20 10 00
+ // p0 = f1 e1 d1 c1 b1 a1 91 81 71 61 51 41 31 21 11 01
+ // q0 = f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02
+ // q1 = f3 e3 d3 c3 b3 a3 93 83 73 63 53 43 33 23 13 03
+ *p1 = _mm_unpacklo_epi64(t1, *p0);
+ *p0 = _mm_unpackhi_epi64(t1, *p0);
+ *q0 = _mm_unpacklo_epi64(t2, *q1);
+ *q1 = _mm_unpackhi_epi64(t2, *q1);
+}
+
+static WEBP_INLINE void Store4x4(__m128i* x, uint8_t* dst, int stride) {
+ int i;
+ for (i = 0; i < 4; ++i, dst += stride) {
+ *((int32_t*)dst) = _mm_cvtsi128_si32(*x);
+ *x = _mm_srli_si128(*x, 4);
+ }
+}
+
+// Transpose back and store
+static WEBP_INLINE void Store16x4(uint8_t* r0, uint8_t* r8, int stride,
+ __m128i* p1, __m128i* p0,
+ __m128i* q0, __m128i* q1) {
+ __m128i t1;
+
+ // p0 = 71 70 61 60 51 50 41 40 31 30 21 20 11 10 01 00
+ // p1 = f1 f0 e1 e0 d1 d0 c1 c0 b1 b0 a1 a0 91 90 81 80
+ t1 = *p0;
+ *p0 = _mm_unpacklo_epi8(*p1, t1);
+ *p1 = _mm_unpackhi_epi8(*p1, t1);
+
+ // q0 = 73 72 63 62 53 52 43 42 33 32 23 22 13 12 03 02
+ // q1 = f3 f2 e3 e2 d3 d2 c3 c2 b3 b2 a3 a2 93 92 83 82
+ t1 = *q0;
+ *q0 = _mm_unpacklo_epi8(t1, *q1);
+ *q1 = _mm_unpackhi_epi8(t1, *q1);
+
+ // p0 = 33 32 31 30 23 22 21 20 13 12 11 10 03 02 01 00
+ // q0 = 73 72 71 70 63 62 61 60 53 52 51 50 43 42 41 40
+ t1 = *p0;
+ *p0 = _mm_unpacklo_epi16(t1, *q0);
+ *q0 = _mm_unpackhi_epi16(t1, *q0);
+
+ // p1 = b3 b2 b1 b0 a3 a2 a1 a0 93 92 91 90 83 82 81 80
+ // q1 = f3 f2 f1 f0 e3 e2 e1 e0 d3 d2 d1 d0 c3 c2 c1 c0
+ t1 = *p1;
+ *p1 = _mm_unpacklo_epi16(t1, *q1);
+ *q1 = _mm_unpackhi_epi16(t1, *q1);
+
+ Store4x4(p0, r0, stride);
+ r0 += 4 * stride;
+ Store4x4(q0, r0, stride);
+
+ Store4x4(p1, r8, stride);
+ r8 += 4 * stride;
+ Store4x4(q1, r8, stride);
+}
+
+//------------------------------------------------------------------------------
+// Simple In-loop filtering (Paragraph 15.2)
+
+static void SimpleVFilter16SSE2(uint8_t* p, int stride, int thresh) {
+ // Load
+ __m128i p1 = _mm_loadu_si128((__m128i*)&p[-2 * stride]);
+ __m128i p0 = _mm_loadu_si128((__m128i*)&p[-stride]);
+ __m128i q0 = _mm_loadu_si128((__m128i*)&p[0]);
+ __m128i q1 = _mm_loadu_si128((__m128i*)&p[stride]);
+
+ DoFilter2(&p1, &p0, &q0, &q1, thresh);
+
+ // Store
+ _mm_storeu_si128((__m128i*)&p[-stride], p0);
+ _mm_storeu_si128((__m128i*)p, q0);
+}
+
+static void SimpleHFilter16SSE2(uint8_t* p, int stride, int thresh) {
+ __m128i p1, p0, q0, q1;
+
+ p -= 2; // beginning of p1
+
+ Load16x4(p, p + 8 * stride, stride, &p1, &p0, &q0, &q1);
+ DoFilter2(&p1, &p0, &q0, &q1, thresh);
+ Store16x4(p, p + 8 * stride, stride, &p1, &p0, &q0, &q1);
+}
+
+static void SimpleVFilter16iSSE2(uint8_t* p, int stride, int thresh) {
+ int k;
+ for (k = 3; k > 0; --k) {
+ p += 4 * stride;
+ SimpleVFilter16SSE2(p, stride, thresh);
+ }
+}
+
+static void SimpleHFilter16iSSE2(uint8_t* p, int stride, int thresh) {
+ int k;
+ for (k = 3; k > 0; --k) {
+ p += 4;
+ SimpleHFilter16SSE2(p, stride, thresh);
+ }
+}
+
+//------------------------------------------------------------------------------
+// Complex In-loop filtering (Paragraph 15.3)
+
+#define MAX_DIFF1(p3, p2, p1, p0, m) { \
+ m = MM_ABS(p3, p2); \
+ m = _mm_max_epu8(m, MM_ABS(p2, p1)); \
+ m = _mm_max_epu8(m, MM_ABS(p1, p0)); \
+}
+
+#define MAX_DIFF2(p3, p2, p1, p0, m) { \
+ m = _mm_max_epu8(m, MM_ABS(p3, p2)); \
+ m = _mm_max_epu8(m, MM_ABS(p2, p1)); \
+ m = _mm_max_epu8(m, MM_ABS(p1, p0)); \
+}
+
+#define LOAD_H_EDGES4(p, stride, e1, e2, e3, e4) { \
+ e1 = _mm_loadu_si128((__m128i*)&(p)[0 * stride]); \
+ e2 = _mm_loadu_si128((__m128i*)&(p)[1 * stride]); \
+ e3 = _mm_loadu_si128((__m128i*)&(p)[2 * stride]); \
+ e4 = _mm_loadu_si128((__m128i*)&(p)[3 * stride]); \
+}
+
+#define LOADUV_H_EDGE(p, u, v, stride) { \
+ p = _mm_loadl_epi64((__m128i*)&(u)[(stride)]); \
+ p = _mm_unpacklo_epi64(p, _mm_loadl_epi64((__m128i*)&(v)[(stride)])); \
+}
+
+#define LOADUV_H_EDGES4(u, v, stride, e1, e2, e3, e4) { \
+ LOADUV_H_EDGE(e1, u, v, 0 * stride); \
+ LOADUV_H_EDGE(e2, u, v, 1 * stride); \
+ LOADUV_H_EDGE(e3, u, v, 2 * stride); \
+ LOADUV_H_EDGE(e4, u, v, 3 * stride); \
+}
+
+#define STOREUV(p, u, v, stride) { \
+ _mm_storel_epi64((__m128i*)&u[(stride)], p); \
+ p = _mm_srli_si128(p, 8); \
+ _mm_storel_epi64((__m128i*)&v[(stride)], p); \
+}
+
+#define COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask) { \
+ __m128i fl_yes; \
+ const __m128i it = _mm_set1_epi8(ithresh); \
+ mask = _mm_subs_epu8(mask, it); \
+ mask = _mm_cmpeq_epi8(mask, _mm_setzero_si128()); \
+ NeedsFilter(&p1, &p0, &q0, &q1, thresh, &fl_yes); \
+ mask = _mm_and_si128(mask, fl_yes); \
+}
+
+// on macroblock edges
+static void VFilter16SSE2(uint8_t* p, int stride,
+ int thresh, int ithresh, int hev_thresh) {
+ __m128i t1;
+ __m128i mask;
+ __m128i p2, p1, p0, q0, q1, q2;
+
+ // Load p3, p2, p1, p0
+ LOAD_H_EDGES4(p - 4 * stride, stride, t1, p2, p1, p0);
+ MAX_DIFF1(t1, p2, p1, p0, mask);
+
+ // Load q0, q1, q2, q3
+ LOAD_H_EDGES4(p, stride, q0, q1, q2, t1);
+ MAX_DIFF2(t1, q2, q1, q0, mask);
+
+ COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
+ DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
+
+ // Store
+ _mm_storeu_si128((__m128i*)&p[-3 * stride], p2);
+ _mm_storeu_si128((__m128i*)&p[-2 * stride], p1);
+ _mm_storeu_si128((__m128i*)&p[-1 * stride], p0);
+ _mm_storeu_si128((__m128i*)&p[0 * stride], q0);
+ _mm_storeu_si128((__m128i*)&p[1 * stride], q1);
+ _mm_storeu_si128((__m128i*)&p[2 * stride], q2);
+}
+
+static void HFilter16SSE2(uint8_t* p, int stride,
+ int thresh, int ithresh, int hev_thresh) {
+ __m128i mask;
+ __m128i p3, p2, p1, p0, q0, q1, q2, q3;
+
+ uint8_t* const b = p - 4;
+ Load16x4(b, b + 8 * stride, stride, &p3, &p2, &p1, &p0); // p3, p2, p1, p0
+ MAX_DIFF1(p3, p2, p1, p0, mask);
+
+ Load16x4(p, p + 8 * stride, stride, &q0, &q1, &q2, &q3); // q0, q1, q2, q3
+ MAX_DIFF2(q3, q2, q1, q0, mask);
+
+ COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
+ DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
+
+ Store16x4(b, b + 8 * stride, stride, &p3, &p2, &p1, &p0);
+ Store16x4(p, p + 8 * stride, stride, &q0, &q1, &q2, &q3);
+}
+
+// on three inner edges
+static void VFilter16iSSE2(uint8_t* p, int stride,
+ int thresh, int ithresh, int hev_thresh) {
+ int k;
+ __m128i mask;
+ __m128i t1, t2, p1, p0, q0, q1;
+
+ for (k = 3; k > 0; --k) {
+ // Load p3, p2, p1, p0
+ LOAD_H_EDGES4(p, stride, t2, t1, p1, p0);
+ MAX_DIFF1(t2, t1, p1, p0, mask);
+
+ p += 4 * stride;
+
+ // Load q0, q1, q2, q3
+ LOAD_H_EDGES4(p, stride, q0, q1, t1, t2);
+ MAX_DIFF2(t2, t1, q1, q0, mask);
+
+ COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
+ DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh);
+
+ // Store
+ _mm_storeu_si128((__m128i*)&p[-2 * stride], p1);
+ _mm_storeu_si128((__m128i*)&p[-1 * stride], p0);
+ _mm_storeu_si128((__m128i*)&p[0 * stride], q0);
+ _mm_storeu_si128((__m128i*)&p[1 * stride], q1);
+ }
+}
+
+static void HFilter16iSSE2(uint8_t* p, int stride,
+ int thresh, int ithresh, int hev_thresh) {
+ int k;
+ uint8_t* b;
+ __m128i mask;
+ __m128i t1, t2, p1, p0, q0, q1;
+
+ for (k = 3; k > 0; --k) {
+ b = p;
+ Load16x4(b, b + 8 * stride, stride, &t2, &t1, &p1, &p0); // p3, p2, p1, p0
+ MAX_DIFF1(t2, t1, p1, p0, mask);
+
+ b += 4; // beginning of q0
+ Load16x4(b, b + 8 * stride, stride, &q0, &q1, &t1, &t2); // q0, q1, q2, q3
+ MAX_DIFF2(t2, t1, q1, q0, mask);
+
+ COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
+ DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh);
+
+ b -= 2; // beginning of p1
+ Store16x4(b, b + 8 * stride, stride, &p1, &p0, &q0, &q1);
+
+ p += 4;
+ }
+}
+
+// 8-pixels wide variant, for chroma filtering
+static void VFilter8SSE2(uint8_t* u, uint8_t* v, int stride,
+ int thresh, int ithresh, int hev_thresh) {
+ __m128i mask;
+ __m128i t1, p2, p1, p0, q0, q1, q2;
+
+ // Load p3, p2, p1, p0
+ LOADUV_H_EDGES4(u - 4 * stride, v - 4 * stride, stride, t1, p2, p1, p0);
+ MAX_DIFF1(t1, p2, p1, p0, mask);
+
+ // Load q0, q1, q2, q3
+ LOADUV_H_EDGES4(u, v, stride, q0, q1, q2, t1);
+ MAX_DIFF2(t1, q2, q1, q0, mask);
+
+ COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
+ DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
+
+ // Store
+ STOREUV(p2, u, v, -3 * stride);
+ STOREUV(p1, u, v, -2 * stride);
+ STOREUV(p0, u, v, -1 * stride);
+ STOREUV(q0, u, v, 0 * stride);
+ STOREUV(q1, u, v, 1 * stride);
+ STOREUV(q2, u, v, 2 * stride);
+}
+
+static void HFilter8SSE2(uint8_t* u, uint8_t* v, int stride,
+ int thresh, int ithresh, int hev_thresh) {
+ __m128i mask;
+ __m128i p3, p2, p1, p0, q0, q1, q2, q3;
+
+ uint8_t* const tu = u - 4;
+ uint8_t* const tv = v - 4;
+ Load16x4(tu, tv, stride, &p3, &p2, &p1, &p0); // p3, p2, p1, p0
+ MAX_DIFF1(p3, p2, p1, p0, mask);
+
+ Load16x4(u, v, stride, &q0, &q1, &q2, &q3); // q0, q1, q2, q3
+ MAX_DIFF2(q3, q2, q1, q0, mask);
+
+ COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
+ DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
+
+ Store16x4(tu, tv, stride, &p3, &p2, &p1, &p0);
+ Store16x4(u, v, stride, &q0, &q1, &q2, &q3);
+}
+
+static void VFilter8iSSE2(uint8_t* u, uint8_t* v, int stride,
+ int thresh, int ithresh, int hev_thresh) {
+ __m128i mask;
+ __m128i t1, t2, p1, p0, q0, q1;
+
+ // Load p3, p2, p1, p0
+ LOADUV_H_EDGES4(u, v, stride, t2, t1, p1, p0);
+ MAX_DIFF1(t2, t1, p1, p0, mask);
+
+ u += 4 * stride;
+ v += 4 * stride;
+
+ // Load q0, q1, q2, q3
+ LOADUV_H_EDGES4(u, v, stride, q0, q1, t1, t2);
+ MAX_DIFF2(t2, t1, q1, q0, mask);
+
+ COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
+ DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh);
+
+ // Store
+ STOREUV(p1, u, v, -2 * stride);
+ STOREUV(p0, u, v, -1 * stride);
+ STOREUV(q0, u, v, 0 * stride);
+ STOREUV(q1, u, v, 1 * stride);
+}
+
+static void HFilter8iSSE2(uint8_t* u, uint8_t* v, int stride,
+ int thresh, int ithresh, int hev_thresh) {
+ __m128i mask;
+ __m128i t1, t2, p1, p0, q0, q1;
+ Load16x4(u, v, stride, &t2, &t1, &p1, &p0); // p3, p2, p1, p0
+ MAX_DIFF1(t2, t1, p1, p0, mask);
+
+ u += 4; // beginning of q0
+ v += 4;
+ Load16x4(u, v, stride, &q0, &q1, &t1, &t2); // q0, q1, q2, q3
+ MAX_DIFF2(t2, t1, q1, q0, mask);
+
+ COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask);
+ DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh);
+
+ u -= 2; // beginning of p1
+ v -= 2;
+ Store16x4(u, v, stride, &p1, &p0, &q0, &q1);
+}
+
+#endif // WEBP_USE_SSE2
+
+//------------------------------------------------------------------------------
+// Entry point
+
+extern void VP8DspInitSSE2(void);
+
+void VP8DspInitSSE2(void) {
+#if defined(WEBP_USE_SSE2)
+ VP8Transform = TransformSSE2;
+#if defined(USE_TRANSFORM_AC3)
+ VP8TransformAC3 = TransformAC3SSE2;
+#endif
+
+ VP8VFilter16 = VFilter16SSE2;
+ VP8HFilter16 = HFilter16SSE2;
+ VP8VFilter8 = VFilter8SSE2;
+ VP8HFilter8 = HFilter8SSE2;
+ VP8VFilter16i = VFilter16iSSE2;
+ VP8HFilter16i = HFilter16iSSE2;
+ VP8VFilter8i = VFilter8iSSE2;
+ VP8HFilter8i = HFilter8iSSE2;
+
+ VP8SimpleVFilter16 = SimpleVFilter16SSE2;
+ VP8SimpleHFilter16 = SimpleHFilter16SSE2;
+ VP8SimpleVFilter16i = SimpleVFilter16iSSE2;
+ VP8SimpleHFilter16i = SimpleHFilter16iSSE2;
+#endif // WEBP_USE_SSE2
+}
+
diff --git a/drivers/webp/dsp/dsp.h b/drivers/webp/dsp/dsp.h
new file mode 100644
index 000000000..3be783afe
--- /dev/null
+++ b/drivers/webp/dsp/dsp.h
@@ -0,0 +1,224 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the COPYING file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+// -----------------------------------------------------------------------------
+//
+// Speed-critical functions.
+//
+// Author: Skal (pascal.massimino@gmail.com)
+
+#ifndef WEBP_DSP_DSP_H_
+#define WEBP_DSP_DSP_H_
+
+#include "../webp/types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+//------------------------------------------------------------------------------
+// CPU detection
+
+#if defined(_MSC_VER) && _MSC_VER > 1310 && \
+ (defined(_M_X64) || defined(_M_IX86))
+#define WEBP_MSC_SSE2 // Visual C++ SSE2 targets
+#endif
+
+#if defined(__SSE2__) || defined(WEBP_MSC_SSE2)
+#define WEBP_USE_SSE2
+#endif
+
+#if defined(__ANDROID__) && defined(__ARM_ARCH_7A__)
+#define WEBP_ANDROID_NEON // Android targets that might support NEON
+#endif
+
+#if defined(__ARM_NEON__) || defined(WEBP_ANDROID_NEON)
+#define WEBP_USE_NEON
+#endif
+
+typedef enum {
+ kSSE2,
+ kSSE3,
+ kNEON
+} CPUFeature;
+// returns true if the CPU supports the feature.
+typedef int (*VP8CPUInfo)(CPUFeature feature);
+extern VP8CPUInfo VP8GetCPUInfo;
+
+//------------------------------------------------------------------------------
+// Encoding
+
+// Transforms
+// VP8Idct: Does one of two inverse transforms. If do_two is set, the transforms
+// will be done for (ref, in, dst) and (ref + 4, in + 16, dst + 4).
+typedef void (*VP8Idct)(const uint8_t* ref, const int16_t* in, uint8_t* dst,
+ int do_two);
+typedef void (*VP8Fdct)(const uint8_t* src, const uint8_t* ref, int16_t* out);
+typedef void (*VP8WHT)(const int16_t* in, int16_t* out);
+extern VP8Idct VP8ITransform;
+extern VP8Fdct VP8FTransform;
+extern VP8WHT VP8ITransformWHT;
+extern VP8WHT VP8FTransformWHT;
+// Predictions
+// *dst is the destination block. *top and *left can be NULL.
+typedef void (*VP8IntraPreds)(uint8_t *dst, const uint8_t* left,
+ const uint8_t* top);
+typedef void (*VP8Intra4Preds)(uint8_t *dst, const uint8_t* top);
+extern VP8Intra4Preds VP8EncPredLuma4;
+extern VP8IntraPreds VP8EncPredLuma16;
+extern VP8IntraPreds VP8EncPredChroma8;
+
+typedef int (*VP8Metric)(const uint8_t* pix, const uint8_t* ref);
+extern VP8Metric VP8SSE16x16, VP8SSE16x8, VP8SSE8x8, VP8SSE4x4;
+typedef int (*VP8WMetric)(const uint8_t* pix, const uint8_t* ref,
+ const uint16_t* const weights);
+extern VP8WMetric VP8TDisto4x4, VP8TDisto16x16;
+
+typedef void (*VP8BlockCopy)(const uint8_t* src, uint8_t* dst);
+extern VP8BlockCopy VP8Copy4x4;
+// Quantization
+struct VP8Matrix; // forward declaration
+typedef int (*VP8QuantizeBlock)(int16_t in[16], int16_t out[16],
+ int n, const struct VP8Matrix* const mtx);
+extern VP8QuantizeBlock VP8EncQuantizeBlock;
+
+// specific to 2nd transform:
+typedef int (*VP8QuantizeBlockWHT)(int16_t in[16], int16_t out[16],
+ const struct VP8Matrix* const mtx);
+extern VP8QuantizeBlockWHT VP8EncQuantizeBlockWHT;
+
+// Collect histogram for susceptibility calculation and accumulate in histo[].
+struct VP8Histogram;
+typedef void (*VP8CHisto)(const uint8_t* ref, const uint8_t* pred,
+ int start_block, int end_block,
+ struct VP8Histogram* const histo);
+extern const int VP8DspScan[16 + 4 + 4];
+extern VP8CHisto VP8CollectHistogram;
+
+void VP8EncDspInit(void); // must be called before using any of the above
+
+//------------------------------------------------------------------------------
+// Decoding
+
+typedef void (*VP8DecIdct)(const int16_t* coeffs, uint8_t* dst);
+// when doing two transforms, coeffs is actually int16_t[2][16].
+typedef void (*VP8DecIdct2)(const int16_t* coeffs, uint8_t* dst, int do_two);
+extern VP8DecIdct2 VP8Transform;
+extern VP8DecIdct VP8TransformAC3;
+extern VP8DecIdct VP8TransformUV;
+extern VP8DecIdct VP8TransformDC;
+extern VP8DecIdct VP8TransformDCUV;
+extern VP8WHT VP8TransformWHT;
+
+// *dst is the destination block, with stride BPS. Boundary samples are
+// assumed accessible when needed.
+typedef void (*VP8PredFunc)(uint8_t* dst);
+extern const VP8PredFunc VP8PredLuma16[/* NUM_B_DC_MODES */];
+extern const VP8PredFunc VP8PredChroma8[/* NUM_B_DC_MODES */];
+extern const VP8PredFunc VP8PredLuma4[/* NUM_BMODES */];
+
+// simple filter (only for luma)
+typedef void (*VP8SimpleFilterFunc)(uint8_t* p, int stride, int thresh);
+extern VP8SimpleFilterFunc VP8SimpleVFilter16;
+extern VP8SimpleFilterFunc VP8SimpleHFilter16;
+extern VP8SimpleFilterFunc VP8SimpleVFilter16i; // filter 3 inner edges
+extern VP8SimpleFilterFunc VP8SimpleHFilter16i;
+
+// regular filter (on both macroblock edges and inner edges)
+typedef void (*VP8LumaFilterFunc)(uint8_t* luma, int stride,
+ int thresh, int ithresh, int hev_t);
+typedef void (*VP8ChromaFilterFunc)(uint8_t* u, uint8_t* v, int stride,
+ int thresh, int ithresh, int hev_t);
+// on outer edge
+extern VP8LumaFilterFunc VP8VFilter16;
+extern VP8LumaFilterFunc VP8HFilter16;
+extern VP8ChromaFilterFunc VP8VFilter8;
+extern VP8ChromaFilterFunc VP8HFilter8;
+
+// on inner edge
+extern VP8LumaFilterFunc VP8VFilter16i; // filtering 3 inner edges altogether
+extern VP8LumaFilterFunc VP8HFilter16i;
+extern VP8ChromaFilterFunc VP8VFilter8i; // filtering u and v altogether
+extern VP8ChromaFilterFunc VP8HFilter8i;
+
+// must be called before anything using the above
+void VP8DspInit(void);
+
+//------------------------------------------------------------------------------
+// WebP I/O
+
+#define FANCY_UPSAMPLING // undefined to remove fancy upsampling support
+
+// Convert a pair of y/u/v lines together to the output rgb/a colorspace.
+// bottom_y can be NULL if only one line of output is needed (at top/bottom).
+typedef void (*WebPUpsampleLinePairFunc)(
+ const uint8_t* top_y, const uint8_t* bottom_y,
+ const uint8_t* top_u, const uint8_t* top_v,
+ const uint8_t* cur_u, const uint8_t* cur_v,
+ uint8_t* top_dst, uint8_t* bottom_dst, int len);
+
+#ifdef FANCY_UPSAMPLING
+
+// Fancy upsampling functions to convert YUV to RGB(A) modes
+extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
+
+// Initializes SSE2 version of the fancy upsamplers.
+void WebPInitUpsamplersSSE2(void);
+
+// NEON version
+void WebPInitUpsamplersNEON(void);
+
+#endif // FANCY_UPSAMPLING
+
+// Point-sampling methods.
+typedef void (*WebPSampleLinePairFunc)(
+ const uint8_t* top_y, const uint8_t* bottom_y,
+ const uint8_t* u, const uint8_t* v,
+ uint8_t* top_dst, uint8_t* bottom_dst, int len);
+
+extern const WebPSampleLinePairFunc WebPSamplers[/* MODE_LAST */];
+
+// General function for converting two lines of ARGB or RGBA.
+// 'alpha_is_last' should be true if 0xff000000 is stored in memory as
+// as 0x00, 0x00, 0x00, 0xff (little endian).
+WebPUpsampleLinePairFunc WebPGetLinePairConverter(int alpha_is_last);
+
+// YUV444->RGB converters
+typedef void (*WebPYUV444Converter)(const uint8_t* y,
+ const uint8_t* u, const uint8_t* v,
+ uint8_t* dst, int len);
+
+extern const WebPYUV444Converter WebPYUV444Converters[/* MODE_LAST */];
+
+// Main function to be called
+void WebPInitUpsamplers(void);
+
+//------------------------------------------------------------------------------
+// Pre-multiply planes with alpha values
+
+// Apply alpha pre-multiply on an rgba, bgra or argb plane of size w * h.
+// alpha_first should be 0 for argb, 1 for rgba or bgra (where alpha is last).
+extern void (*WebPApplyAlphaMultiply)(
+ uint8_t* rgba, int alpha_first, int w, int h, int stride);
+
+// Same, buf specifically for RGBA4444 format
+extern void (*WebPApplyAlphaMultiply4444)(
+ uint8_t* rgba4444, int w, int h, int stride);
+
+// To be called first before using the above.
+void WebPInitPremultiply(void);
+
+void WebPInitPremultiplySSE2(void); // should not be called directly.
+void WebPInitPremultiplyNEON(void);
+
+//------------------------------------------------------------------------------
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif /* WEBP_DSP_DSP_H_ */
diff --git a/drivers/webp/dsp/enc.c b/drivers/webp/dsp/enc.c
new file mode 100644
index 000000000..fcc6ec8ea
--- /dev/null
+++ b/drivers/webp/dsp/enc.c
@@ -0,0 +1,753 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the COPYING file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+// -----------------------------------------------------------------------------
+//
+// Speed-critical encoding functions.
+//
+// Author: Skal (pascal.massimino@gmail.com)
+
+#include <assert.h>
+#include <stdlib.h> // for abs()
+
+#include "./dsp.h"
+#include "../enc/vp8enci.h"
+
+static WEBP_INLINE uint8_t clip_8b(int v) {
+ return (!(v & ~0xff)) ? v : (v < 0) ? 0 : 255;
+}
+
+static WEBP_INLINE int clip_max(int v, int max) {
+ return (v > max) ? max : v;
+}
+
+//------------------------------------------------------------------------------
+// Compute susceptibility based on DCT-coeff histograms:
+// the higher, the "easier" the macroblock is to compress.
+
+const int VP8DspScan[16 + 4 + 4] = {
+ // Luma
+ 0 + 0 * BPS, 4 + 0 * BPS, 8 + 0 * BPS, 12 + 0 * BPS,
+ 0 + 4 * BPS, 4 + 4 * BPS, 8 + 4 * BPS, 12 + 4 * BPS,
+ 0 + 8 * BPS, 4 + 8 * BPS, 8 + 8 * BPS, 12 + 8 * BPS,
+ 0 + 12 * BPS, 4 + 12 * BPS, 8 + 12 * BPS, 12 + 12 * BPS,
+
+ 0 + 0 * BPS, 4 + 0 * BPS, 0 + 4 * BPS, 4 + 4 * BPS, // U
+ 8 + 0 * BPS, 12 + 0 * BPS, 8 + 4 * BPS, 12 + 4 * BPS // V
+};
+
+static void CollectHistogram(const uint8_t* ref, const uint8_t* pred,
+ int start_block, int end_block,
+ VP8Histogram* const histo) {
+ int j;
+ for (j = start_block; j < end_block; ++j) {
+ int k;
+ int16_t out[16];
+
+ VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out);
+
+ // Convert coefficients to bin.
+ for (k = 0; k < 16; ++k) {
+ const int v = abs(out[k]) >> 3; // TODO(skal): add rounding?
+ const int clipped_value = clip_max(v, MAX_COEFF_THRESH);
+ histo->distribution[clipped_value]++;
+ }
+ }
+}
+
+//------------------------------------------------------------------------------
+// run-time tables (~4k)
+
+static uint8_t clip1[255 + 510 + 1]; // clips [-255,510] to [0,255]
+
+// We declare this variable 'volatile' to prevent instruction reordering
+// and make sure it's set to true _last_ (so as to be thread-safe)
+static volatile int tables_ok = 0;
+
+static void InitTables(void) {
+ if (!tables_ok) {
+ int i;
+ for (i = -255; i <= 255 + 255; ++i) {
+ clip1[255 + i] = clip_8b(i);
+ }
+ tables_ok = 1;
+ }
+}
+
+
+//------------------------------------------------------------------------------
+// Transforms (Paragraph 14.4)
+
+#define STORE(x, y, v) \
+ dst[(x) + (y) * BPS] = clip_8b(ref[(x) + (y) * BPS] + ((v) >> 3))
+
+static const int kC1 = 20091 + (1 << 16);
+static const int kC2 = 35468;
+#define MUL(a, b) (((a) * (b)) >> 16)
+
+static WEBP_INLINE void ITransformOne(const uint8_t* ref, const int16_t* in,
+ uint8_t* dst) {
+ int C[4 * 4], *tmp;
+ int i;
+ tmp = C;
+ for (i = 0; i < 4; ++i) { // vertical pass
+ const int a = in[0] + in[8];
+ const int b = in[0] - in[8];
+ const int c = MUL(in[4], kC2) - MUL(in[12], kC1);
+ const int d = MUL(in[4], kC1) + MUL(in[12], kC2);
+ tmp[0] = a + d;
+ tmp[1] = b + c;
+ tmp[2] = b - c;
+ tmp[3] = a - d;
+ tmp += 4;
+ in++;
+ }
+
+ tmp = C;
+ for (i = 0; i < 4; ++i) { // horizontal pass
+ const int dc = tmp[0] + 4;
+ const int a = dc + tmp[8];
+ const int b = dc - tmp[8];
+ const int c = MUL(tmp[4], kC2) - MUL(tmp[12], kC1);
+ const int d = MUL(tmp[4], kC1) + MUL(tmp[12], kC2);
+ STORE(0, i, a + d);
+ STORE(1, i, b + c);
+ STORE(2, i, b - c);
+ STORE(3, i, a - d);
+ tmp++;
+ }
+}
+
+static void ITransform(const uint8_t* ref, const int16_t* in, uint8_t* dst,
+ int do_two) {
+ ITransformOne(ref, in, dst);
+ if (do_two) {
+ ITransformOne(ref + 4, in + 16, dst + 4);
+ }
+}
+
+static void FTransform(const uint8_t* src, const uint8_t* ref, int16_t* out) {
+ int i;
+ int tmp[16];
+ for (i = 0; i < 4; ++i, src += BPS, ref += BPS) {
+ const int d0 = src[0] - ref[0]; // 9bit dynamic range ([-255,255])
+ const int d1 = src[1] - ref[1];
+ const int d2 = src[2] - ref[2];
+ const int d3 = src[3] - ref[3];
+ const int a0 = (d0 + d3); // 10b [-510,510]
+ const int a1 = (d1 + d2);
+ const int a2 = (d1 - d2);
+ const int a3 = (d0 - d3);
+ tmp[0 + i * 4] = (a0 + a1) * 8; // 14b [-8160,8160]
+ tmp[1 + i * 4] = (a2 * 2217 + a3 * 5352 + 1812) >> 9; // [-7536,7542]
+ tmp[2 + i * 4] = (a0 - a1) * 8;
+ tmp[3 + i * 4] = (a3 * 2217 - a2 * 5352 + 937) >> 9;
+ }
+ for (i = 0; i < 4; ++i) {
+ const int a0 = (tmp[0 + i] + tmp[12 + i]); // 15b
+ const int a1 = (tmp[4 + i] + tmp[ 8 + i]);
+ const int a2 = (tmp[4 + i] - tmp[ 8 + i]);
+ const int a3 = (tmp[0 + i] - tmp[12 + i]);
+ out[0 + i] = (a0 + a1 + 7) >> 4; // 12b
+ out[4 + i] = ((a2 * 2217 + a3 * 5352 + 12000) >> 16) + (a3 != 0);
+ out[8 + i] = (a0 - a1 + 7) >> 4;
+ out[12+ i] = ((a3 * 2217 - a2 * 5352 + 51000) >> 16);
+ }
+}
+
+static void ITransformWHT(const int16_t* in, int16_t* out) {
+ int tmp[16];
+ int i;
+ for (i = 0; i < 4; ++i) {
+ const int a0 = in[0 + i] + in[12 + i];
+ const int a1 = in[4 + i] + in[ 8 + i];
+ const int a2 = in[4 + i] - in[ 8 + i];
+ const int a3 = in[0 + i] - in[12 + i];
+ tmp[0 + i] = a0 + a1;
+ tmp[8 + i] = a0 - a1;
+ tmp[4 + i] = a3 + a2;
+ tmp[12 + i] = a3 - a2;
+ }
+ for (i = 0; i < 4; ++i) {
+ const int dc = tmp[0 + i * 4] + 3; // w/ rounder
+ const int a0 = dc + tmp[3 + i * 4];
+ const int a1 = tmp[1 + i * 4] + tmp[2 + i * 4];
+ const int a2 = tmp[1 + i * 4] - tmp[2 + i * 4];
+ const int a3 = dc - tmp[3 + i * 4];
+ out[ 0] = (a0 + a1) >> 3;
+ out[16] = (a3 + a2) >> 3;
+ out[32] = (a0 - a1) >> 3;
+ out[48] = (a3 - a2) >> 3;
+ out += 64;
+ }
+}
+
+static void FTransformWHT(const int16_t* in, int16_t* out) {
+ // input is 12b signed
+ int32_t tmp[16];
+ int i;
+ for (i = 0; i < 4; ++i, in += 64) {
+ const int a0 = (in[0 * 16] + in[2 * 16]); // 13b
+ const int a1 = (in[1 * 16] + in[3 * 16]);
+ const int a2 = (in[1 * 16] - in[3 * 16]);
+ const int a3 = (in[0 * 16] - in[2 * 16]);
+ tmp[0 + i * 4] = a0 + a1; // 14b
+ tmp[1 + i * 4] = a3 + a2;
+ tmp[2 + i * 4] = a3 - a2;
+ tmp[3 + i * 4] = a0 - a1;
+ }
+ for (i = 0; i < 4; ++i) {
+ const int a0 = (tmp[0 + i] + tmp[8 + i]); // 15b
+ const int a1 = (tmp[4 + i] + tmp[12+ i]);
+ const int a2 = (tmp[4 + i] - tmp[12+ i]);
+ const int a3 = (tmp[0 + i] - tmp[8 + i]);
+ const int b0 = a0 + a1; // 16b
+ const int b1 = a3 + a2;
+ const int b2 = a3 - a2;
+ const int b3 = a0 - a1;
+ out[ 0 + i] = b0 >> 1; // 15b
+ out[ 4 + i] = b1 >> 1;
+ out[ 8 + i] = b2 >> 1;
+ out[12 + i] = b3 >> 1;
+ }
+}
+
+#undef MUL
+#undef STORE
+
+//------------------------------------------------------------------------------
+// Intra predictions
+
+#define DST(x, y) dst[(x) + (y) * BPS]
+
+static WEBP_INLINE void Fill(uint8_t* dst, int value, int size) {
+ int j;
+ for (j = 0; j < size; ++j) {
+ memset(dst + j * BPS, value, size);
+ }
+}
+
+static WEBP_INLINE void VerticalPred(uint8_t* dst,
+ const uint8_t* top, int size) {
+ int j;
+ if (top) {
+ for (j = 0; j < size; ++j) memcpy(dst + j * BPS, top, size);
+ } else {
+ Fill(dst, 127, size);
+ }
+}
+
+static WEBP_INLINE void HorizontalPred(uint8_t* dst,
+ const uint8_t* left, int size) {
+ if (left) {
+ int j;
+ for (j = 0; j < size; ++j) {
+ memset(dst + j * BPS, left[j], size);
+ }
+ } else {
+ Fill(dst, 129, size);
+ }
+}
+
+static WEBP_INLINE void TrueMotion(uint8_t* dst, const uint8_t* left,
+ const uint8_t* top, int size) {
+ int y;
+ if (left) {
+ if (top) {
+ const uint8_t* const clip = clip1 + 255 - left[-1];
+ for (y = 0; y < size; ++y) {
+ const uint8_t* const clip_table = clip + left[y];
+ int x;
+ for (x = 0; x < size; ++x) {
+ dst[x] = clip_table[top[x]];
+ }
+ dst += BPS;
+ }
+ } else {
+ HorizontalPred(dst, left, size);
+ }
+ } else {
+ // true motion without left samples (hence: with default 129 value)
+ // is equivalent to VE prediction where you just copy the top samples.
+ // Note that if top samples are not available, the default value is
+ // then 129, and not 127 as in the VerticalPred case.
+ if (top) {
+ VerticalPred(dst, top, size);
+ } else {
+ Fill(dst, 129, size);
+ }
+ }
+}
+
+static WEBP_INLINE void DCMode(uint8_t* dst, const uint8_t* left,
+ const uint8_t* top,
+ int size, int round, int shift) {
+ int DC = 0;
+ int j;
+ if (top) {
+ for (j = 0; j < size; ++j) DC += top[j];
+ if (left) { // top and left present
+ for (j = 0; j < size; ++j) DC += left[j];
+ } else { // top, but no left
+ DC += DC;
+ }
+ DC = (DC + round) >> shift;
+ } else if (left) { // left but no top
+ for (j = 0; j < size; ++j) DC += left[j];
+ DC += DC;
+ DC = (DC + round) >> shift;
+ } else { // no top, no left, nothing.
+ DC = 0x80;
+ }
+ Fill(dst, DC, size);
+}
+
+//------------------------------------------------------------------------------
+// Chroma 8x8 prediction (paragraph 12.2)
+
+static void IntraChromaPreds(uint8_t* dst, const uint8_t* left,
+ const uint8_t* top) {
+ // U block
+ DCMode(C8DC8 + dst, left, top, 8, 8, 4);
+ VerticalPred(C8VE8 + dst, top, 8);
+ HorizontalPred(C8HE8 + dst, left, 8);
+ TrueMotion(C8TM8 + dst, left, top, 8);
+ // V block
+ dst += 8;
+ if (top) top += 8;
+ if (left) left += 16;
+ DCMode(C8DC8 + dst, left, top, 8, 8, 4);
+ VerticalPred(C8VE8 + dst, top, 8);
+ HorizontalPred(C8HE8 + dst, left, 8);
+ TrueMotion(C8TM8 + dst, left, top, 8);
+}
+
+//------------------------------------------------------------------------------
+// luma 16x16 prediction (paragraph 12.3)
+
+static void Intra16Preds(uint8_t* dst,
+ const uint8_t* left, const uint8_t* top) {
+ DCMode(I16DC16 + dst, left, top, 16, 16, 5);
+ VerticalPred(I16VE16 + dst, top, 16);
+ HorizontalPred(I16HE16 + dst, left, 16);
+ TrueMotion(I16TM16 + dst, left, top, 16);
+}
+
+//------------------------------------------------------------------------------
+// luma 4x4 prediction
+
+#define AVG3(a, b, c) (((a) + 2 * (b) + (c) + 2) >> 2)
+#define AVG2(a, b) (((a) + (b) + 1) >> 1)
+
+static void VE4(uint8_t* dst, const uint8_t* top) { // vertical
+ const uint8_t vals[4] = {
+ AVG3(top[-1], top[0], top[1]),
+ AVG3(top[ 0], top[1], top[2]),
+ AVG3(top[ 1], top[2], top[3]),
+ AVG3(top[ 2], top[3], top[4])
+ };
+ int i;
+ for (i = 0; i < 4; ++i) {
+ memcpy(dst + i * BPS, vals, 4);
+ }
+}
+
+static void HE4(uint8_t* dst, const uint8_t* top) { // horizontal
+ const int X = top[-1];
+ const int I = top[-2];
+ const int J = top[-3];
+ const int K = top[-4];
+ const int L = top[-5];
+ *(uint32_t*)(dst + 0 * BPS) = 0x01010101U * AVG3(X, I, J);
+ *(uint32_t*)(dst + 1 * BPS) = 0x01010101U * AVG3(I, J, K);
+ *(uint32_t*)(dst + 2 * BPS) = 0x01010101U * AVG3(J, K, L);
+ *(uint32_t*)(dst + 3 * BPS) = 0x01010101U * AVG3(K, L, L);
+}
+
+static void DC4(uint8_t* dst, const uint8_t* top) {
+ uint32_t dc = 4;
+ int i;
+ for (i = 0; i < 4; ++i) dc += top[i] + top[-5 + i];
+ Fill(dst, dc >> 3, 4);
+}
+
+static void RD4(uint8_t* dst, const uint8_t* top) {
+ const int X = top[-1];
+ const int I = top[-2];
+ const int J = top[-3];
+ const int K = top[-4];
+ const int L = top[-5];
+ const int A = top[0];
+ const int B = top[1];
+ const int C = top[2];
+ const int D = top[3];
+ DST(0, 3) = AVG3(J, K, L);
+ DST(0, 2) = DST(1, 3) = AVG3(I, J, K);
+ DST(0, 1) = DST(1, 2) = DST(2, 3) = AVG3(X, I, J);
+ DST(0, 0) = DST(1, 1) = DST(2, 2) = DST(3, 3) = AVG3(A, X, I);
+ DST(1, 0) = DST(2, 1) = DST(3, 2) = AVG3(B, A, X);
+ DST(2, 0) = DST(3, 1) = AVG3(C, B, A);
+ DST(3, 0) = AVG3(D, C, B);
+}
+
+static void LD4(uint8_t* dst, const uint8_t* top) {
+ const int A = top[0];
+ const int B = top[1];
+ const int C = top[2];
+ const int D = top[3];
+ const int E = top[4];
+ const int F = top[5];
+ const int G = top[6];
+ const int H = top[7];
+ DST(0, 0) = AVG3(A, B, C);
+ DST(1, 0) = DST(0, 1) = AVG3(B, C, D);
+ DST(2, 0) = DST(1, 1) = DST(0, 2) = AVG3(C, D, E);
+ DST(3, 0) = DST(2, 1) = DST(1, 2) = DST(0, 3) = AVG3(D, E, F);
+ DST(3, 1) = DST(2, 2) = DST(1, 3) = AVG3(E, F, G);
+ DST(3, 2) = DST(2, 3) = AVG3(F, G, H);
+ DST(3, 3) = AVG3(G, H, H);
+}
+
+static void VR4(uint8_t* dst, const uint8_t* top) {
+ const int X = top[-1];
+ const int I = top[-2];
+ const int J = top[-3];
+ const int K = top[-4];
+ const int A = top[0];
+ const int B = top[1];
+ const int C = top[2];
+ const int D = top[3];
+ DST(0, 0) = DST(1, 2) = AVG2(X, A);
+ DST(1, 0) = DST(2, 2) = AVG2(A, B);
+ DST(2, 0) = DST(3, 2) = AVG2(B, C);
+ DST(3, 0) = AVG2(C, D);
+
+ DST(0, 3) = AVG3(K, J, I);
+ DST(0, 2) = AVG3(J, I, X);
+ DST(0, 1) = DST(1, 3) = AVG3(I, X, A);
+ DST(1, 1) = DST(2, 3) = AVG3(X, A, B);
+ DST(2, 1) = DST(3, 3) = AVG3(A, B, C);
+ DST(3, 1) = AVG3(B, C, D);
+}
+
+static void VL4(uint8_t* dst, const uint8_t* top) {
+ const int A = top[0];
+ const int B = top[1];
+ const int C = top[2];
+ const int D = top[3];
+ const int E = top[4];
+ const int F = top[5];
+ const int G = top[6];
+ const int H = top[7];
+ DST(0, 0) = AVG2(A, B);
+ DST(1, 0) = DST(0, 2) = AVG2(B, C);
+ DST(2, 0) = DST(1, 2) = AVG2(C, D);
+ DST(3, 0) = DST(2, 2) = AVG2(D, E);
+
+ DST(0, 1) = AVG3(A, B, C);
+ DST(1, 1) = DST(0, 3) = AVG3(B, C, D);
+ DST(2, 1) = DST(1, 3) = AVG3(C, D, E);
+ DST(3, 1) = DST(2, 3) = AVG3(D, E, F);
+ DST(3, 2) = AVG3(E, F, G);
+ DST(3, 3) = AVG3(F, G, H);
+}
+
+static void HU4(uint8_t* dst, const uint8_t* top) {
+ const int I = top[-2];
+ const int J = top[-3];
+ const int K = top[-4];
+ const int L = top[-5];
+ DST(0, 0) = AVG2(I, J);
+ DST(2, 0) = DST(0, 1) = AVG2(J, K);
+ DST(2, 1) = DST(0, 2) = AVG2(K, L);
+ DST(1, 0) = AVG3(I, J, K);
+ DST(3, 0) = DST(1, 1) = AVG3(J, K, L);
+ DST(3, 1) = DST(1, 2) = AVG3(K, L, L);
+ DST(3, 2) = DST(2, 2) =
+ DST(0, 3) = DST(1, 3) = DST(2, 3) = DST(3, 3) = L;
+}
+
+static void HD4(uint8_t* dst, const uint8_t* top) {
+ const int X = top[-1];
+ const int I = top[-2];
+ const int J = top[-3];
+ const int K = top[-4];
+ const int L = top[-5];
+ const int A = top[0];
+ const int B = top[1];
+ const int C = top[2];
+
+ DST(0, 0) = DST(2, 1) = AVG2(I, X);
+ DST(0, 1) = DST(2, 2) = AVG2(J, I);
+ DST(0, 2) = DST(2, 3) = AVG2(K, J);
+ DST(0, 3) = AVG2(L, K);
+
+ DST(3, 0) = AVG3(A, B, C);
+ DST(2, 0) = AVG3(X, A, B);
+ DST(1, 0) = DST(3, 1) = AVG3(I, X, A);
+ DST(1, 1) = DST(3, 2) = AVG3(J, I, X);
+ DST(1, 2) = DST(3, 3) = AVG3(K, J, I);
+ DST(1, 3) = AVG3(L, K, J);
+}
+
+static void TM4(uint8_t* dst, const uint8_t* top) {
+ int x, y;
+ const uint8_t* const clip = clip1 + 255 - top[-1];
+ for (y = 0; y < 4; ++y) {
+ const uint8_t* const clip_table = clip + top[-2 - y];
+ for (x = 0; x < 4; ++x) {
+ dst[x] = clip_table[top[x]];
+ }
+ dst += BPS;
+ }
+}
+
+#undef DST
+#undef AVG3
+#undef AVG2
+
+// Left samples are top[-5 .. -2], top_left is top[-1], top are
+// located at top[0..3], and top right is top[4..7]
+static void Intra4Preds(uint8_t* dst, const uint8_t* top) {
+ DC4(I4DC4 + dst, top);
+ TM4(I4TM4 + dst, top);
+ VE4(I4VE4 + dst, top);
+ HE4(I4HE4 + dst, top);
+ RD4(I4RD4 + dst, top);
+ VR4(I4VR4 + dst, top);
+ LD4(I4LD4 + dst, top);
+ VL4(I4VL4 + dst, top);
+ HD4(I4HD4 + dst, top);
+ HU4(I4HU4 + dst, top);
+}
+
+//------------------------------------------------------------------------------
+// Metric
+
+static WEBP_INLINE int GetSSE(const uint8_t* a, const uint8_t* b,
+ int w, int h) {
+ int count = 0;
+ int y, x;
+ for (y = 0; y < h; ++y) {
+ for (x = 0; x < w; ++x) {
+ const int diff = (int)a[x] - b[x];
+ count += diff * diff;
+ }
+ a += BPS;
+ b += BPS;
+ }
+ return count;
+}
+
+static int SSE16x16(const uint8_t* a, const uint8_t* b) {
+ return GetSSE(a, b, 16, 16);
+}
+static int SSE16x8(const uint8_t* a, const uint8_t* b) {
+ return GetSSE(a, b, 16, 8);
+}
+static int SSE8x8(const uint8_t* a, const uint8_t* b) {
+ return GetSSE(a, b, 8, 8);
+}
+static int SSE4x4(const uint8_t* a, const uint8_t* b) {
+ return GetSSE(a, b, 4, 4);
+}
+
+//------------------------------------------------------------------------------
+// Texture distortion
+//
+// We try to match the spectral content (weighted) between source and
+// reconstructed samples.
+
+// Hadamard transform
+// Returns the weighted sum of the absolute value of transformed coefficients.
+static int TTransform(const uint8_t* in, const uint16_t* w) {
+ int sum = 0;
+ int tmp[16];
+ int i;
+ // horizontal pass
+ for (i = 0; i < 4; ++i, in += BPS) {
+ const int a0 = in[0] + in[2];
+ const int a1 = in[1] + in[3];
+ const int a2 = in[1] - in[3];
+ const int a3 = in[0] - in[2];
+ tmp[0 + i * 4] = a0 + a1;
+ tmp[1 + i * 4] = a3 + a2;
+ tmp[2 + i * 4] = a3 - a2;
+ tmp[3 + i * 4] = a0 - a1;
+ }
+ // vertical pass
+ for (i = 0; i < 4; ++i, ++w) {
+ const int a0 = tmp[0 + i] + tmp[8 + i];
+ const int a1 = tmp[4 + i] + tmp[12+ i];
+ const int a2 = tmp[4 + i] - tmp[12+ i];
+ const int a3 = tmp[0 + i] - tmp[8 + i];
+ const int b0 = a0 + a1;
+ const int b1 = a3 + a2;
+ const int b2 = a3 - a2;
+ const int b3 = a0 - a1;
+
+ sum += w[ 0] * abs(b0);
+ sum += w[ 4] * abs(b1);
+ sum += w[ 8] * abs(b2);
+ sum += w[12] * abs(b3);
+ }
+ return sum;
+}
+
+static int Disto4x4(const uint8_t* const a, const uint8_t* const b,
+ const uint16_t* const w) {
+ const int sum1 = TTransform(a, w);
+ const int sum2 = TTransform(b, w);
+ return abs(sum2 - sum1) >> 5;
+}
+
+static int Disto16x16(const uint8_t* const a, const uint8_t* const b,
+ const uint16_t* const w) {
+ int D = 0;
+ int x, y;
+ for (y = 0; y < 16 * BPS; y += 4 * BPS) {
+ for (x = 0; x < 16; x += 4) {
+ D += Disto4x4(a + x + y, b + x + y, w);
+ }
+ }
+ return D;
+}
+
+//------------------------------------------------------------------------------
+// Quantization
+//
+
+static const uint8_t kZigzag[16] = {
+ 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15
+};
+
+// Simple quantization
+static int QuantizeBlock(int16_t in[16], int16_t out[16],
+ int n, const VP8Matrix* const mtx) {
+ int last = -1;
+ for (; n < 16; ++n) {
+ const int j = kZigzag[n];
+ const int sign = (in[j] < 0);
+ const int coeff = (sign ? -in[j] : in[j]) + mtx->sharpen_[j];
+ if (coeff > mtx->zthresh_[j]) {
+ const int Q = mtx->q_[j];
+ const int iQ = mtx->iq_[j];
+ const int B = mtx->bias_[j];
+ out[n] = QUANTDIV(coeff, iQ, B);
+ if (out[n] > MAX_LEVEL) out[n] = MAX_LEVEL;
+ if (sign) out[n] = -out[n];
+ in[j] = out[n] * Q;
+ if (out[n]) last = n;
+ } else {
+ out[n] = 0;
+ in[j] = 0;
+ }
+ }
+ return (last >= 0);
+}
+
+static int QuantizeBlockWHT(int16_t in[16], int16_t out[16],
+ const VP8Matrix* const mtx) {
+ int n, last = -1;
+ for (n = 0; n < 16; ++n) {
+ const int j = kZigzag[n];
+ const int sign = (in[j] < 0);
+ const int coeff = sign ? -in[j] : in[j];
+ assert(mtx->sharpen_[j] == 0);
+ if (coeff > mtx->zthresh_[j]) {
+ const int Q = mtx->q_[j];
+ const int iQ = mtx->iq_[j];
+ const int B = mtx->bias_[j];
+ out[n] = QUANTDIV(coeff, iQ, B);
+ if (out[n] > MAX_LEVEL) out[n] = MAX_LEVEL;
+ if (sign) out[n] = -out[n];
+ in[j] = out[n] * Q;
+ if (out[n]) last = n;
+ } else {
+ out[n] = 0;
+ in[j] = 0;
+ }
+ }
+ return (last >= 0);
+}
+
+//------------------------------------------------------------------------------
+// Block copy
+
+static WEBP_INLINE void Copy(const uint8_t* src, uint8_t* dst, int size) {
+ int y;
+ for (y = 0; y < size; ++y) {
+ memcpy(dst, src, size);
+ src += BPS;
+ dst += BPS;
+ }
+}
+
+static void Copy4x4(const uint8_t* src, uint8_t* dst) { Copy(src, dst, 4); }
+
+//------------------------------------------------------------------------------
+// Initialization
+
+// Speed-critical function pointers. We have to initialize them to the default
+// implementations within VP8EncDspInit().
+VP8CHisto VP8CollectHistogram;
+VP8Idct VP8ITransform;
+VP8Fdct VP8FTransform;
+VP8WHT VP8ITransformWHT;
+VP8WHT VP8FTransformWHT;
+VP8Intra4Preds VP8EncPredLuma4;
+VP8IntraPreds VP8EncPredLuma16;
+VP8IntraPreds VP8EncPredChroma8;
+VP8Metric VP8SSE16x16;
+VP8Metric VP8SSE8x8;
+VP8Metric VP8SSE16x8;
+VP8Metric VP8SSE4x4;
+VP8WMetric VP8TDisto4x4;
+VP8WMetric VP8TDisto16x16;
+VP8QuantizeBlock VP8EncQuantizeBlock;
+VP8QuantizeBlockWHT VP8EncQuantizeBlockWHT;
+VP8BlockCopy VP8Copy4x4;
+
+extern void VP8EncDspInitSSE2(void);
+extern void VP8EncDspInitNEON(void);
+
+void VP8EncDspInit(void) {
+ InitTables();
+
+ // default C implementations
+ VP8CollectHistogram = CollectHistogram;
+ VP8ITransform = ITransform;
+ VP8FTransform = FTransform;
+ VP8ITransformWHT = ITransformWHT;
+ VP8FTransformWHT = FTransformWHT;
+ VP8EncPredLuma4 = Intra4Preds;
+ VP8EncPredLuma16 = Intra16Preds;
+ VP8EncPredChroma8 = IntraChromaPreds;
+ VP8SSE16x16 = SSE16x16;
+ VP8SSE8x8 = SSE8x8;
+ VP8SSE16x8 = SSE16x8;
+ VP8SSE4x4 = SSE4x4;
+ VP8TDisto4x4 = Disto4x4;
+ VP8TDisto16x16 = Disto16x16;
+ VP8EncQuantizeBlock = QuantizeBlock;
+ VP8EncQuantizeBlockWHT = QuantizeBlockWHT;
+ VP8Copy4x4 = Copy4x4;
+
+ // If defined, use CPUInfo() to overwrite some pointers with faster versions.
+ if (VP8GetCPUInfo) {
+#if defined(WEBP_USE_SSE2)
+ if (VP8GetCPUInfo(kSSE2)) {
+ VP8EncDspInitSSE2();
+ }
+#elif defined(WEBP_USE_NEON)
+ if (VP8GetCPUInfo(kNEON)) {
+ VP8EncDspInitNEON();
+ }
+#endif
+ }
+}
+
diff --git a/drivers/webp/dsp/enc_neon.c b/drivers/webp/dsp/enc_neon.c
new file mode 100644
index 000000000..52cca1868
--- /dev/null
+++ b/drivers/webp/dsp/enc_neon.c
@@ -0,0 +1,632 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the COPYING file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+// -----------------------------------------------------------------------------
+//
+// ARM NEON version of speed-critical encoding functions.
+//
+// adapted from libvpx (http://www.webmproject.org/code/)
+
+#include "./dsp.h"
+
+#if defined(WEBP_USE_NEON)
+
+#include "../enc/vp8enci.h"
+
+//------------------------------------------------------------------------------
+// Transforms (Paragraph 14.4)
+
+// Inverse transform.
+// This code is pretty much the same as TransformOneNEON in the decoder, except
+// for subtraction to *ref. See the comments there for algorithmic explanations.
+static void ITransformOne(const uint8_t* ref,
+ const int16_t* in, uint8_t* dst) {
+ const int kBPS = BPS;
+ const int16_t kC1C2[] = { 20091, 17734, 0, 0 }; // kC1 / (kC2 >> 1) / 0 / 0
+
+ __asm__ volatile (
+ "vld1.16 {q1, q2}, [%[in]] \n"
+ "vld1.16 {d0}, [%[kC1C2]] \n"
+
+ // d2: in[0]
+ // d3: in[8]
+ // d4: in[4]
+ // d5: in[12]
+ "vswp d3, d4 \n"
+
+ // q8 = {in[4], in[12]} * kC1 * 2 >> 16
+ // q9 = {in[4], in[12]} * kC2 >> 16
+ "vqdmulh.s16 q8, q2, d0[0] \n"
+ "vqdmulh.s16 q9, q2, d0[1] \n"
+
+ // d22 = a = in[0] + in[8]
+ // d23 = b = in[0] - in[8]
+ "vqadd.s16 d22, d2, d3 \n"
+ "vqsub.s16 d23, d2, d3 \n"
+
+ // q8 = in[4]/[12] * kC1 >> 16
+ "vshr.s16 q8, q8, #1 \n"
+
+ // Add {in[4], in[12]} back after the multiplication.
+ "vqadd.s16 q8, q2, q8 \n"
+
+ // d20 = c = in[4]*kC2 - in[12]*kC1
+ // d21 = d = in[4]*kC1 + in[12]*kC2
+ "vqsub.s16 d20, d18, d17 \n"
+ "vqadd.s16 d21, d19, d16 \n"
+
+ // d2 = tmp[0] = a + d
+ // d3 = tmp[1] = b + c
+ // d4 = tmp[2] = b - c
+ // d5 = tmp[3] = a - d
+ "vqadd.s16 d2, d22, d21 \n"
+ "vqadd.s16 d3, d23, d20 \n"
+ "vqsub.s16 d4, d23, d20 \n"
+ "vqsub.s16 d5, d22, d21 \n"
+
+ "vzip.16 q1, q2 \n"
+ "vzip.16 q1, q2 \n"
+
+ "vswp d3, d4 \n"
+
+ // q8 = {tmp[4], tmp[12]} * kC1 * 2 >> 16
+ // q9 = {tmp[4], tmp[12]} * kC2 >> 16
+ "vqdmulh.s16 q8, q2, d0[0] \n"
+ "vqdmulh.s16 q9, q2, d0[1] \n"
+
+ // d22 = a = tmp[0] + tmp[8]
+ // d23 = b = tmp[0] - tmp[8]
+ "vqadd.s16 d22, d2, d3 \n"
+ "vqsub.s16 d23, d2, d3 \n"
+
+ "vshr.s16 q8, q8, #1 \n"
+ "vqadd.s16 q8, q2, q8 \n"
+
+ // d20 = c = in[4]*kC2 - in[12]*kC1
+ // d21 = d = in[4]*kC1 + in[12]*kC2
+ "vqsub.s16 d20, d18, d17 \n"
+ "vqadd.s16 d21, d19, d16 \n"
+
+ // d2 = tmp[0] = a + d
+ // d3 = tmp[1] = b + c
+ // d4 = tmp[2] = b - c
+ // d5 = tmp[3] = a - d
+ "vqadd.s16 d2, d22, d21 \n"
+ "vqadd.s16 d3, d23, d20 \n"
+ "vqsub.s16 d4, d23, d20 \n"
+ "vqsub.s16 d5, d22, d21 \n"
+
+ "vld1.32 d6[0], [%[ref]], %[kBPS] \n"
+ "vld1.32 d6[1], [%[ref]], %[kBPS] \n"
+ "vld1.32 d7[0], [%[ref]], %[kBPS] \n"
+ "vld1.32 d7[1], [%[ref]], %[kBPS] \n"
+
+ "sub %[ref], %[ref], %[kBPS], lsl #2 \n"
+
+ // (val) + 4 >> 3
+ "vrshr.s16 d2, d2, #3 \n"
+ "vrshr.s16 d3, d3, #3 \n"
+ "vrshr.s16 d4, d4, #3 \n"
+ "vrshr.s16 d5, d5, #3 \n"
+
+ "vzip.16 q1, q2 \n"
+ "vzip.16 q1, q2 \n"
+
+ // Must accumulate before saturating
+ "vmovl.u8 q8, d6 \n"
+ "vmovl.u8 q9, d7 \n"
+
+ "vqadd.s16 q1, q1, q8 \n"
+ "vqadd.s16 q2, q2, q9 \n"
+
+ "vqmovun.s16 d0, q1 \n"
+ "vqmovun.s16 d1, q2 \n"
+
+ "vst1.32 d0[0], [%[dst]], %[kBPS] \n"
+ "vst1.32 d0[1], [%[dst]], %[kBPS] \n"
+ "vst1.32 d1[0], [%[dst]], %[kBPS] \n"
+ "vst1.32 d1[1], [%[dst]] \n"
+
+ : [in] "+r"(in), [dst] "+r"(dst) // modified registers
+ : [kBPS] "r"(kBPS), [kC1C2] "r"(kC1C2), [ref] "r"(ref) // constants
+ : "memory", "q0", "q1", "q2", "q8", "q9", "q10", "q11" // clobbered
+ );
+}
+
+static void ITransform(const uint8_t* ref,
+ const int16_t* in, uint8_t* dst, int do_two) {
+ ITransformOne(ref, in, dst);
+ if (do_two) {
+ ITransformOne(ref + 4, in + 16, dst + 4);
+ }
+}
+
+// Same code as dec_neon.c
+static void ITransformWHT(const int16_t* in, int16_t* out) {
+ const int kStep = 32; // The store is only incrementing the pointer as if we
+ // had stored a single byte.
+ __asm__ volatile (
+ // part 1
+ // load data into q0, q1
+ "vld1.16 {q0, q1}, [%[in]] \n"
+
+ "vaddl.s16 q2, d0, d3 \n" // a0 = in[0] + in[12]
+ "vaddl.s16 q3, d1, d2 \n" // a1 = in[4] + in[8]
+ "vsubl.s16 q4, d1, d2 \n" // a2 = in[4] - in[8]
+ "vsubl.s16 q5, d0, d3 \n" // a3 = in[0] - in[12]
+
+ "vadd.s32 q0, q2, q3 \n" // tmp[0] = a0 + a1
+ "vsub.s32 q2, q2, q3 \n" // tmp[8] = a0 - a1
+ "vadd.s32 q1, q5, q4 \n" // tmp[4] = a3 + a2
+ "vsub.s32 q3, q5, q4 \n" // tmp[12] = a3 - a2
+
+ // Transpose
+ // q0 = tmp[0, 4, 8, 12], q1 = tmp[2, 6, 10, 14]
+ // q2 = tmp[1, 5, 9, 13], q3 = tmp[3, 7, 11, 15]
+ "vswp d1, d4 \n" // vtrn.64 q0, q2
+ "vswp d3, d6 \n" // vtrn.64 q1, q3
+ "vtrn.32 q0, q1 \n"
+ "vtrn.32 q2, q3 \n"
+
+ "vmov.s32 q4, #3 \n" // dc = 3
+ "vadd.s32 q0, q0, q4 \n" // dc = tmp[0] + 3
+ "vadd.s32 q6, q0, q3 \n" // a0 = dc + tmp[3]
+ "vadd.s32 q7, q1, q2 \n" // a1 = tmp[1] + tmp[2]
+ "vsub.s32 q8, q1, q2 \n" // a2 = tmp[1] - tmp[2]
+ "vsub.s32 q9, q0, q3 \n" // a3 = dc - tmp[3]
+
+ "vadd.s32 q0, q6, q7 \n"
+ "vshrn.s32 d0, q0, #3 \n" // (a0 + a1) >> 3
+ "vadd.s32 q1, q9, q8 \n"
+ "vshrn.s32 d1, q1, #3 \n" // (a3 + a2) >> 3
+ "vsub.s32 q2, q6, q7 \n"
+ "vshrn.s32 d2, q2, #3 \n" // (a0 - a1) >> 3
+ "vsub.s32 q3, q9, q8 \n"
+ "vshrn.s32 d3, q3, #3 \n" // (a3 - a2) >> 3
+
+ // set the results to output
+ "vst1.16 d0[0], [%[out]], %[kStep] \n"
+ "vst1.16 d1[0], [%[out]], %[kStep] \n"
+ "vst1.16 d2[0], [%[out]], %[kStep] \n"
+ "vst1.16 d3[0], [%[out]], %[kStep] \n"
+ "vst1.16 d0[1], [%[out]], %[kStep] \n"
+ "vst1.16 d1[1], [%[out]], %[kStep] \n"
+ "vst1.16 d2[1], [%[out]], %[kStep] \n"
+ "vst1.16 d3[1], [%[out]], %[kStep] \n"
+ "vst1.16 d0[2], [%[out]], %[kStep] \n"
+ "vst1.16 d1[2], [%[out]], %[kStep] \n"
+ "vst1.16 d2[2], [%[out]], %[kStep] \n"
+ "vst1.16 d3[2], [%[out]], %[kStep] \n"
+ "vst1.16 d0[3], [%[out]], %[kStep] \n"
+ "vst1.16 d1[3], [%[out]], %[kStep] \n"
+ "vst1.16 d2[3], [%[out]], %[kStep] \n"
+ "vst1.16 d3[3], [%[out]], %[kStep] \n"
+
+ : [out] "+r"(out) // modified registers
+ : [in] "r"(in), [kStep] "r"(kStep) // constants
+ : "memory", "q0", "q1", "q2", "q3", "q4",
+ "q5", "q6", "q7", "q8", "q9" // clobbered
+ );
+}
+
+// Forward transform.
+
+// adapted from vp8/encoder/arm/neon/shortfdct_neon.asm
+static const int16_t kCoeff16[] = {
+ 5352, 5352, 5352, 5352, 2217, 2217, 2217, 2217
+};
+static const int32_t kCoeff32[] = {
+ 1812, 1812, 1812, 1812,
+ 937, 937, 937, 937,
+ 12000, 12000, 12000, 12000,
+ 51000, 51000, 51000, 51000
+};
+
+static void FTransform(const uint8_t* src, const uint8_t* ref,
+ int16_t* out) {
+ const int kBPS = BPS;
+ const uint8_t* src_ptr = src;
+ const uint8_t* ref_ptr = ref;
+ const int16_t* coeff16 = kCoeff16;
+ const int32_t* coeff32 = kCoeff32;
+
+ __asm__ volatile (
+ // load src into q4, q5 in high half
+ "vld1.8 {d8}, [%[src_ptr]], %[kBPS] \n"
+ "vld1.8 {d10}, [%[src_ptr]], %[kBPS] \n"
+ "vld1.8 {d9}, [%[src_ptr]], %[kBPS] \n"
+ "vld1.8 {d11}, [%[src_ptr]] \n"
+
+ // load ref into q6, q7 in high half
+ "vld1.8 {d12}, [%[ref_ptr]], %[kBPS] \n"
+ "vld1.8 {d14}, [%[ref_ptr]], %[kBPS] \n"
+ "vld1.8 {d13}, [%[ref_ptr]], %[kBPS] \n"
+ "vld1.8 {d15}, [%[ref_ptr]] \n"
+
+ // Pack the high values in to q4 and q6
+ "vtrn.32 q4, q5 \n"
+ "vtrn.32 q6, q7 \n"
+
+ // d[0-3] = src - ref
+ "vsubl.u8 q0, d8, d12 \n"
+ "vsubl.u8 q1, d9, d13 \n"
+
+ // load coeff16 into q8(d16=5352, d17=2217)
+ "vld1.16 {q8}, [%[coeff16]] \n"
+
+ // load coeff32 high half into q9 = 1812, q10 = 937
+ "vld1.32 {q9, q10}, [%[coeff32]]! \n"
+
+ // load coeff32 low half into q11=12000, q12=51000
+ "vld1.32 {q11,q12}, [%[coeff32]] \n"
+
+ // part 1
+ // Transpose. Register dN is the same as dN in C
+ "vtrn.32 d0, d2 \n"
+ "vtrn.32 d1, d3 \n"
+ "vtrn.16 d0, d1 \n"
+ "vtrn.16 d2, d3 \n"
+
+ "vadd.s16 d4, d0, d3 \n" // a0 = d0 + d3
+ "vadd.s16 d5, d1, d2 \n" // a1 = d1 + d2
+ "vsub.s16 d6, d1, d2 \n" // a2 = d1 - d2
+ "vsub.s16 d7, d0, d3 \n" // a3 = d0 - d3
+
+ "vadd.s16 d0, d4, d5 \n" // a0 + a1
+ "vshl.s16 d0, d0, #3 \n" // temp[0+i*4] = (a0+a1) << 3
+ "vsub.s16 d2, d4, d5 \n" // a0 - a1
+ "vshl.s16 d2, d2, #3 \n" // (temp[2+i*4] = (a0-a1) << 3
+
+ "vmlal.s16 q9, d7, d16 \n" // a3*5352 + 1812
+ "vmlal.s16 q10, d7, d17 \n" // a3*2217 + 937
+ "vmlal.s16 q9, d6, d17 \n" // a2*2217 + a3*5352 + 1812
+ "vmlsl.s16 q10, d6, d16 \n" // a3*2217 + 937 - a2*5352
+
+ // temp[1+i*4] = (d2*2217 + d3*5352 + 1812) >> 9
+ // temp[3+i*4] = (d3*2217 + 937 - d2*5352) >> 9
+ "vshrn.s32 d1, q9, #9 \n"
+ "vshrn.s32 d3, q10, #9 \n"
+
+ // part 2
+ // transpose d0=ip[0], d1=ip[4], d2=ip[8], d3=ip[12]
+ "vtrn.32 d0, d2 \n"
+ "vtrn.32 d1, d3 \n"
+ "vtrn.16 d0, d1 \n"
+ "vtrn.16 d2, d3 \n"
+
+ "vmov.s16 d26, #7 \n"
+
+ "vadd.s16 d4, d0, d3 \n" // a1 = ip[0] + ip[12]
+ "vadd.s16 d5, d1, d2 \n" // b1 = ip[4] + ip[8]
+ "vsub.s16 d6, d1, d2 \n" // c1 = ip[4] - ip[8]
+ "vadd.s16 d4, d4, d26 \n" // a1 + 7
+ "vsub.s16 d7, d0, d3 \n" // d1 = ip[0] - ip[12]
+
+ "vadd.s16 d0, d4, d5 \n" // op[0] = a1 + b1 + 7
+ "vsub.s16 d2, d4, d5 \n" // op[8] = a1 - b1 + 7
+
+ "vmlal.s16 q11, d7, d16 \n" // d1*5352 + 12000
+ "vmlal.s16 q12, d7, d17 \n" // d1*2217 + 51000
+
+ "vceq.s16 d4, d7, #0 \n"
+
+ "vshr.s16 d0, d0, #4 \n"
+ "vshr.s16 d2, d2, #4 \n"
+
+ "vmlal.s16 q11, d6, d17 \n" // c1*2217 + d1*5352 + 12000
+ "vmlsl.s16 q12, d6, d16 \n" // d1*2217 - c1*5352 + 51000
+
+ "vmvn d4, d4 \n" // !(d1 == 0)
+ // op[4] = (c1*2217 + d1*5352 + 12000)>>16
+ "vshrn.s32 d1, q11, #16 \n"
+ // op[4] += (d1!=0)
+ "vsub.s16 d1, d1, d4 \n"
+ // op[12]= (d1*2217 - c1*5352 + 51000)>>16
+ "vshrn.s32 d3, q12, #16 \n"
+
+ // set result to out array
+ "vst1.16 {q0, q1}, [%[out]] \n"
+ : [src_ptr] "+r"(src_ptr), [ref_ptr] "+r"(ref_ptr),
+ [coeff32] "+r"(coeff32) // modified registers
+ : [kBPS] "r"(kBPS), [coeff16] "r"(coeff16),
+ [out] "r"(out) // constants
+ : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9",
+ "q10", "q11", "q12", "q13" // clobbered
+ );
+}
+
+static void FTransformWHT(const int16_t* in, int16_t* out) {
+ const int kStep = 32;
+ __asm__ volatile (
+ // d0 = in[0 * 16] , d1 = in[1 * 16]
+ // d2 = in[2 * 16] , d3 = in[3 * 16]
+ "vld1.16 d0[0], [%[in]], %[kStep] \n"
+ "vld1.16 d1[0], [%[in]], %[kStep] \n"
+ "vld1.16 d2[0], [%[in]], %[kStep] \n"
+ "vld1.16 d3[0], [%[in]], %[kStep] \n"
+ "vld1.16 d0[1], [%[in]], %[kStep] \n"
+ "vld1.16 d1[1], [%[in]], %[kStep] \n"
+ "vld1.16 d2[1], [%[in]], %[kStep] \n"
+ "vld1.16 d3[1], [%[in]], %[kStep] \n"
+ "vld1.16 d0[2], [%[in]], %[kStep] \n"
+ "vld1.16 d1[2], [%[in]], %[kStep] \n"
+ "vld1.16 d2[2], [%[in]], %[kStep] \n"
+ "vld1.16 d3[2], [%[in]], %[kStep] \n"
+ "vld1.16 d0[3], [%[in]], %[kStep] \n"
+ "vld1.16 d1[3], [%[in]], %[kStep] \n"
+ "vld1.16 d2[3], [%[in]], %[kStep] \n"
+ "vld1.16 d3[3], [%[in]], %[kStep] \n"
+
+ "vaddl.s16 q2, d0, d2 \n" // a0=(in[0*16]+in[2*16])
+ "vaddl.s16 q3, d1, d3 \n" // a1=(in[1*16]+in[3*16])
+ "vsubl.s16 q4, d1, d3 \n" // a2=(in[1*16]-in[3*16])
+ "vsubl.s16 q5, d0, d2 \n" // a3=(in[0*16]-in[2*16])
+
+ "vqadd.s32 q6, q2, q3 \n" // a0 + a1
+ "vqadd.s32 q7, q5, q4 \n" // a3 + a2
+ "vqsub.s32 q8, q5, q4 \n" // a3 - a2
+ "vqsub.s32 q9, q2, q3 \n" // a0 - a1
+
+ // Transpose
+ // q6 = tmp[0, 1, 2, 3] ; q7 = tmp[ 4, 5, 6, 7]
+ // q8 = tmp[8, 9, 10, 11] ; q9 = tmp[12, 13, 14, 15]
+ "vswp d13, d16 \n" // vtrn.64 q0, q2
+ "vswp d15, d18 \n" // vtrn.64 q1, q3
+ "vtrn.32 q6, q7 \n"
+ "vtrn.32 q8, q9 \n"
+
+ "vqadd.s32 q0, q6, q8 \n" // a0 = tmp[0] + tmp[8]
+ "vqadd.s32 q1, q7, q9 \n" // a1 = tmp[4] + tmp[12]
+ "vqsub.s32 q2, q7, q9 \n" // a2 = tmp[4] - tmp[12]
+ "vqsub.s32 q3, q6, q8 \n" // a3 = tmp[0] - tmp[8]
+
+ "vqadd.s32 q4, q0, q1 \n" // b0 = a0 + a1
+ "vqadd.s32 q5, q3, q2 \n" // b1 = a3 + a2
+ "vqsub.s32 q6, q3, q2 \n" // b2 = a3 - a2
+ "vqsub.s32 q7, q0, q1 \n" // b3 = a0 - a1
+
+ "vshrn.s32 d18, q4, #1 \n" // b0 >> 1
+ "vshrn.s32 d19, q5, #1 \n" // b1 >> 1
+ "vshrn.s32 d20, q6, #1 \n" // b2 >> 1
+ "vshrn.s32 d21, q7, #1 \n" // b3 >> 1
+
+ "vst1.16 {q9, q10}, [%[out]] \n"
+
+ : [in] "+r"(in)
+ : [kStep] "r"(kStep), [out] "r"(out)
+ : "memory", "q0", "q1", "q2", "q3", "q4", "q5",
+ "q6", "q7", "q8", "q9", "q10" // clobbered
+ ) ;
+}
+
+//------------------------------------------------------------------------------
+// Texture distortion
+//
+// We try to match the spectral content (weighted) between source and
+// reconstructed samples.
+
+// Hadamard transform
+// Returns the weighted sum of the absolute value of transformed coefficients.
+// This uses a TTransform helper function in C
+static int Disto4x4(const uint8_t* const a, const uint8_t* const b,
+ const uint16_t* const w) {
+ const int kBPS = BPS;
+ const uint8_t* A = a;
+ const uint8_t* B = b;
+ const uint16_t* W = w;
+ int sum;
+ __asm__ volatile (
+ "vld1.32 d0[0], [%[a]], %[kBPS] \n"
+ "vld1.32 d0[1], [%[a]], %[kBPS] \n"
+ "vld1.32 d2[0], [%[a]], %[kBPS] \n"
+ "vld1.32 d2[1], [%[a]] \n"
+
+ "vld1.32 d1[0], [%[b]], %[kBPS] \n"
+ "vld1.32 d1[1], [%[b]], %[kBPS] \n"
+ "vld1.32 d3[0], [%[b]], %[kBPS] \n"
+ "vld1.32 d3[1], [%[b]] \n"
+
+ // a d0/d2, b d1/d3
+ // d0/d1: 01 01 01 01
+ // d2/d3: 23 23 23 23
+ // But: it goes 01 45 23 67
+ // Notice the middle values are transposed
+ "vtrn.16 q0, q1 \n"
+
+ // {a0, a1} = {in[0] + in[2], in[1] + in[3]}
+ "vaddl.u8 q2, d0, d2 \n"
+ "vaddl.u8 q10, d1, d3 \n"
+ // {a3, a2} = {in[0] - in[2], in[1] - in[3]}
+ "vsubl.u8 q3, d0, d2 \n"
+ "vsubl.u8 q11, d1, d3 \n"
+
+ // tmp[0] = a0 + a1
+ "vpaddl.s16 q0, q2 \n"
+ "vpaddl.s16 q8, q10 \n"
+
+ // tmp[1] = a3 + a2
+ "vpaddl.s16 q1, q3 \n"
+ "vpaddl.s16 q9, q11 \n"
+
+ // No pair subtract
+ // q2 = {a0, a3}
+ // q3 = {a1, a2}
+ "vtrn.16 q2, q3 \n"
+ "vtrn.16 q10, q11 \n"
+
+ // {tmp[3], tmp[2]} = {a0 - a1, a3 - a2}
+ "vsubl.s16 q12, d4, d6 \n"
+ "vsubl.s16 q13, d5, d7 \n"
+ "vsubl.s16 q14, d20, d22 \n"
+ "vsubl.s16 q15, d21, d23 \n"
+
+ // separate tmp[3] and tmp[2]
+ // q12 = tmp[3]
+ // q13 = tmp[2]
+ "vtrn.32 q12, q13 \n"
+ "vtrn.32 q14, q15 \n"
+
+ // Transpose tmp for a
+ "vswp d1, d26 \n" // vtrn.64
+ "vswp d3, d24 \n" // vtrn.64
+ "vtrn.32 q0, q1 \n"
+ "vtrn.32 q13, q12 \n"
+
+ // Transpose tmp for b
+ "vswp d17, d30 \n" // vtrn.64
+ "vswp d19, d28 \n" // vtrn.64
+ "vtrn.32 q8, q9 \n"
+ "vtrn.32 q15, q14 \n"
+
+ // The first Q register is a, the second b.
+ // q0/8 tmp[0-3]
+ // q13/15 tmp[4-7]
+ // q1/9 tmp[8-11]
+ // q12/14 tmp[12-15]
+
+ // These are still in 01 45 23 67 order. We fix it easily in the addition
+ // case but the subtraction propagates them.
+ "vswp d3, d27 \n"
+ "vswp d19, d31 \n"
+
+ // a0 = tmp[0] + tmp[8]
+ "vadd.s32 q2, q0, q1 \n"
+ "vadd.s32 q3, q8, q9 \n"
+
+ // a1 = tmp[4] + tmp[12]
+ "vadd.s32 q10, q13, q12 \n"
+ "vadd.s32 q11, q15, q14 \n"
+
+ // a2 = tmp[4] - tmp[12]
+ "vsub.s32 q13, q13, q12 \n"
+ "vsub.s32 q15, q15, q14 \n"
+
+ // a3 = tmp[0] - tmp[8]
+ "vsub.s32 q0, q0, q1 \n"
+ "vsub.s32 q8, q8, q9 \n"
+
+ // b0 = a0 + a1
+ "vadd.s32 q1, q2, q10 \n"
+ "vadd.s32 q9, q3, q11 \n"
+
+ // b1 = a3 + a2
+ "vadd.s32 q12, q0, q13 \n"
+ "vadd.s32 q14, q8, q15 \n"
+
+ // b2 = a3 - a2
+ "vsub.s32 q0, q0, q13 \n"
+ "vsub.s32 q8, q8, q15 \n"
+
+ // b3 = a0 - a1
+ "vsub.s32 q2, q2, q10 \n"
+ "vsub.s32 q3, q3, q11 \n"
+
+ "vld1.64 {q10, q11}, [%[w]] \n"
+
+ // abs(b0)
+ "vabs.s32 q1, q1 \n"
+ "vabs.s32 q9, q9 \n"
+ // abs(b1)
+ "vabs.s32 q12, q12 \n"
+ "vabs.s32 q14, q14 \n"
+ // abs(b2)
+ "vabs.s32 q0, q0 \n"
+ "vabs.s32 q8, q8 \n"
+ // abs(b3)
+ "vabs.s32 q2, q2 \n"
+ "vabs.s32 q3, q3 \n"
+
+ // expand w before using.
+ "vmovl.u16 q13, d20 \n"
+ "vmovl.u16 q15, d21 \n"
+
+ // w[0] * abs(b0)
+ "vmul.u32 q1, q1, q13 \n"
+ "vmul.u32 q9, q9, q13 \n"
+
+ // w[4] * abs(b1)
+ "vmla.u32 q1, q12, q15 \n"
+ "vmla.u32 q9, q14, q15 \n"
+
+ // expand w before using.
+ "vmovl.u16 q13, d22 \n"
+ "vmovl.u16 q15, d23 \n"
+
+ // w[8] * abs(b1)
+ "vmla.u32 q1, q0, q13 \n"
+ "vmla.u32 q9, q8, q13 \n"
+
+ // w[12] * abs(b1)
+ "vmla.u32 q1, q2, q15 \n"
+ "vmla.u32 q9, q3, q15 \n"
+
+ // Sum the arrays
+ "vpaddl.u32 q1, q1 \n"
+ "vpaddl.u32 q9, q9 \n"
+ "vadd.u64 d2, d3 \n"
+ "vadd.u64 d18, d19 \n"
+
+ // Hadamard transform needs 4 bits of extra precision (2 bits in each
+ // direction) for dynamic raw. Weights w[] are 16bits at max, so the maximum
+ // precision for coeff is 8bit of input + 4bits of Hadamard transform +
+ // 16bits for w[] + 2 bits of abs() summation.
+ //
+ // This uses a maximum of 31 bits (signed). Discarding the top 32 bits is
+ // A-OK.
+
+ // sum2 - sum1
+ "vsub.u32 d0, d2, d18 \n"
+ // abs(sum2 - sum1)
+ "vabs.s32 d0, d0 \n"
+ // abs(sum2 - sum1) >> 5
+ "vshr.u32 d0, #5 \n"
+
+ // It would be better to move the value straight into r0 but I'm not
+ // entirely sure how this works with inline assembly.
+ "vmov.32 %[sum], d0[0] \n"
+
+ : [sum] "=r"(sum), [a] "+r"(A), [b] "+r"(B), [w] "+r"(W)
+ : [kBPS] "r"(kBPS)
+ : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9",
+ "q10", "q11", "q12", "q13", "q14", "q15" // clobbered
+ ) ;
+
+ return sum;
+}
+
+static int Disto16x16(const uint8_t* const a, const uint8_t* const b,
+ const uint16_t* const w) {
+ int D = 0;
+ int x, y;
+ for (y = 0; y < 16 * BPS; y += 4 * BPS) {
+ for (x = 0; x < 16; x += 4) {
+ D += Disto4x4(a + x + y, b + x + y, w);
+ }
+ }
+ return D;
+}
+
+#endif // WEBP_USE_NEON
+
+//------------------------------------------------------------------------------
+// Entry point
+
+extern void VP8EncDspInitNEON(void);
+
+void VP8EncDspInitNEON(void) {
+#if defined(WEBP_USE_NEON)
+ VP8ITransform = ITransform;
+ VP8FTransform = FTransform;
+
+ VP8ITransformWHT = ITransformWHT;
+ VP8FTransformWHT = FTransformWHT;
+
+ VP8TDisto4x4 = Disto4x4;
+ VP8TDisto16x16 = Disto16x16;
+#endif // WEBP_USE_NEON
+}
+
diff --git a/drivers/webp/dsp/enc_sse2.c b/drivers/webp/dsp/enc_sse2.c
new file mode 100644
index 000000000..540a3cb2d
--- /dev/null
+++ b/drivers/webp/dsp/enc_sse2.c
@@ -0,0 +1,957 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the COPYING file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+// -----------------------------------------------------------------------------
+//
+// SSE2 version of speed-critical encoding functions.
+//
+// Author: Christian Duvivier (cduvivier@google.com)
+
+#include "./dsp.h"
+
+#if defined(WEBP_USE_SSE2)
+#include <stdlib.h> // for abs()
+#include <emmintrin.h>
+
+#include "../enc/vp8enci.h"
+
+//------------------------------------------------------------------------------
+// Quite useful macro for debugging. Left here for convenience.
+
+#if 0
+#include <stdio.h>
+static void PrintReg(const __m128i r, const char* const name, int size) {
+ int n;
+ union {
+ __m128i r;
+ uint8_t i8[16];
+ uint16_t i16[8];
+ uint32_t i32[4];
+ uint64_t i64[2];
+ } tmp;
+ tmp.r = r;
+ printf("%s\t: ", name);
+ if (size == 8) {
+ for (n = 0; n < 16; ++n) printf("%.2x ", tmp.i8[n]);
+ } else if (size == 16) {
+ for (n = 0; n < 8; ++n) printf("%.4x ", tmp.i16[n]);
+ } else if (size == 32) {
+ for (n = 0; n < 4; ++n) printf("%.8x ", tmp.i32[n]);
+ } else {
+ for (n = 0; n < 2; ++n) printf("%.16lx ", tmp.i64[n]);
+ }
+ printf("\n");
+}
+#endif
+
+//------------------------------------------------------------------------------
+// Compute susceptibility based on DCT-coeff histograms:
+// the higher, the "easier" the macroblock is to compress.
+
+static void CollectHistogramSSE2(const uint8_t* ref, const uint8_t* pred,
+ int start_block, int end_block,
+ VP8Histogram* const histo) {
+ const __m128i max_coeff_thresh = _mm_set1_epi16(MAX_COEFF_THRESH);
+ int j;
+ for (j = start_block; j < end_block; ++j) {
+ int16_t out[16];
+ int k;
+
+ VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out);
+
+ // Convert coefficients to bin (within out[]).
+ {
+ // Load.
+ const __m128i out0 = _mm_loadu_si128((__m128i*)&out[0]);
+ const __m128i out1 = _mm_loadu_si128((__m128i*)&out[8]);
+ // sign(out) = out >> 15 (0x0000 if positive, 0xffff if negative)
+ const __m128i sign0 = _mm_srai_epi16(out0, 15);
+ const __m128i sign1 = _mm_srai_epi16(out1, 15);
+ // abs(out) = (out ^ sign) - sign
+ const __m128i xor0 = _mm_xor_si128(out0, sign0);
+ const __m128i xor1 = _mm_xor_si128(out1, sign1);
+ const __m128i abs0 = _mm_sub_epi16(xor0, sign0);
+ const __m128i abs1 = _mm_sub_epi16(xor1, sign1);
+ // v = abs(out) >> 3
+ const __m128i v0 = _mm_srai_epi16(abs0, 3);
+ const __m128i v1 = _mm_srai_epi16(abs1, 3);
+ // bin = min(v, MAX_COEFF_THRESH)
+ const __m128i bin0 = _mm_min_epi16(v0, max_coeff_thresh);
+ const __m128i bin1 = _mm_min_epi16(v1, max_coeff_thresh);
+ // Store.
+ _mm_storeu_si128((__m128i*)&out[0], bin0);
+ _mm_storeu_si128((__m128i*)&out[8], bin1);
+ }
+
+ // Convert coefficients to bin.
+ for (k = 0; k < 16; ++k) {
+ histo->distribution[out[k]]++;
+ }
+ }
+}
+
+//------------------------------------------------------------------------------
+// Transforms (Paragraph 14.4)
+
+// Does one or two inverse transforms.
+static void ITransformSSE2(const uint8_t* ref, const int16_t* in, uint8_t* dst,
+ int do_two) {
+ // This implementation makes use of 16-bit fixed point versions of two
+ // multiply constants:
+ // K1 = sqrt(2) * cos (pi/8) ~= 85627 / 2^16
+ // K2 = sqrt(2) * sin (pi/8) ~= 35468 / 2^16
+ //
+ // To be able to use signed 16-bit integers, we use the following trick to
+ // have constants within range:
+ // - Associated constants are obtained by subtracting the 16-bit fixed point
+ // version of one:
+ // k = K - (1 << 16) => K = k + (1 << 16)
+ // K1 = 85267 => k1 = 20091
+ // K2 = 35468 => k2 = -30068
+ // - The multiplication of a variable by a constant become the sum of the
+ // variable and the multiplication of that variable by the associated
+ // constant:
+ // (x * K) >> 16 = (x * (k + (1 << 16))) >> 16 = ((x * k ) >> 16) + x
+ const __m128i k1 = _mm_set1_epi16(20091);
+ const __m128i k2 = _mm_set1_epi16(-30068);
+ __m128i T0, T1, T2, T3;
+
+ // Load and concatenate the transform coefficients (we'll do two inverse
+ // transforms in parallel). In the case of only one inverse transform, the
+ // second half of the vectors will just contain random value we'll never
+ // use nor store.
+ __m128i in0, in1, in2, in3;
+ {
+ in0 = _mm_loadl_epi64((__m128i*)&in[0]);
+ in1 = _mm_loadl_epi64((__m128i*)&in[4]);
+ in2 = _mm_loadl_epi64((__m128i*)&in[8]);
+ in3 = _mm_loadl_epi64((__m128i*)&in[12]);
+ // a00 a10 a20 a30 x x x x
+ // a01 a11 a21 a31 x x x x
+ // a02 a12 a22 a32 x x x x
+ // a03 a13 a23 a33 x x x x
+ if (do_two) {
+ const __m128i inB0 = _mm_loadl_epi64((__m128i*)&in[16]);
+ const __m128i inB1 = _mm_loadl_epi64((__m128i*)&in[20]);
+ const __m128i inB2 = _mm_loadl_epi64((__m128i*)&in[24]);
+ const __m128i inB3 = _mm_loadl_epi64((__m128i*)&in[28]);
+ in0 = _mm_unpacklo_epi64(in0, inB0);
+ in1 = _mm_unpacklo_epi64(in1, inB1);
+ in2 = _mm_unpacklo_epi64(in2, inB2);
+ in3 = _mm_unpacklo_epi64(in3, inB3);
+ // a00 a10 a20 a30 b00 b10 b20 b30
+ // a01 a11 a21 a31 b01 b11 b21 b31
+ // a02 a12 a22 a32 b02 b12 b22 b32
+ // a03 a13 a23 a33 b03 b13 b23 b33
+ }
+ }
+
+ // Vertical pass and subsequent transpose.
+ {
+ // First pass, c and d calculations are longer because of the "trick"
+ // multiplications.
+ const __m128i a = _mm_add_epi16(in0, in2);
+ const __m128i b = _mm_sub_epi16(in0, in2);
+ // c = MUL(in1, K2) - MUL(in3, K1) = MUL(in1, k2) - MUL(in3, k1) + in1 - in3
+ const __m128i c1 = _mm_mulhi_epi16(in1, k2);
+ const __m128i c2 = _mm_mulhi_epi16(in3, k1);
+ const __m128i c3 = _mm_sub_epi16(in1, in3);
+ const __m128i c4 = _mm_sub_epi16(c1, c2);
+ const __m128i c = _mm_add_epi16(c3, c4);
+ // d = MUL(in1, K1) + MUL(in3, K2) = MUL(in1, k1) + MUL(in3, k2) + in1 + in3
+ const __m128i d1 = _mm_mulhi_epi16(in1, k1);
+ const __m128i d2 = _mm_mulhi_epi16(in3, k2);
+ const __m128i d3 = _mm_add_epi16(in1, in3);
+ const __m128i d4 = _mm_add_epi16(d1, d2);
+ const __m128i d = _mm_add_epi16(d3, d4);
+
+ // Second pass.
+ const __m128i tmp0 = _mm_add_epi16(a, d);
+ const __m128i tmp1 = _mm_add_epi16(b, c);
+ const __m128i tmp2 = _mm_sub_epi16(b, c);
+ const __m128i tmp3 = _mm_sub_epi16(a, d);
+
+ // Transpose the two 4x4.
+ // a00 a01 a02 a03 b00 b01 b02 b03
+ // a10 a11 a12 a13 b10 b11 b12 b13
+ // a20 a21 a22 a23 b20 b21 b22 b23
+ // a30 a31 a32 a33 b30 b31 b32 b33
+ const __m128i transpose0_0 = _mm_unpacklo_epi16(tmp0, tmp1);
+ const __m128i transpose0_1 = _mm_unpacklo_epi16(tmp2, tmp3);
+ const __m128i transpose0_2 = _mm_unpackhi_epi16(tmp0, tmp1);
+ const __m128i transpose0_3 = _mm_unpackhi_epi16(tmp2, tmp3);
+ // a00 a10 a01 a11 a02 a12 a03 a13
+ // a20 a30 a21 a31 a22 a32 a23 a33
+ // b00 b10 b01 b11 b02 b12 b03 b13
+ // b20 b30 b21 b31 b22 b32 b23 b33
+ const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1);
+ const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3);
+ const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1);
+ const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3);
+ // a00 a10 a20 a30 a01 a11 a21 a31
+ // b00 b10 b20 b30 b01 b11 b21 b31
+ // a02 a12 a22 a32 a03 a13 a23 a33
+ // b02 b12 a22 b32 b03 b13 b23 b33
+ T0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1);
+ T1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1);
+ T2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3);
+ T3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3);
+ // a00 a10 a20 a30 b00 b10 b20 b30
+ // a01 a11 a21 a31 b01 b11 b21 b31
+ // a02 a12 a22 a32 b02 b12 b22 b32
+ // a03 a13 a23 a33 b03 b13 b23 b33
+ }
+
+ // Horizontal pass and subsequent transpose.
+ {
+ // First pass, c and d calculations are longer because of the "trick"
+ // multiplications.
+ const __m128i four = _mm_set1_epi16(4);
+ const __m128i dc = _mm_add_epi16(T0, four);
+ const __m128i a = _mm_add_epi16(dc, T2);
+ const __m128i b = _mm_sub_epi16(dc, T2);
+ // c = MUL(T1, K2) - MUL(T3, K1) = MUL(T1, k2) - MUL(T3, k1) + T1 - T3
+ const __m128i c1 = _mm_mulhi_epi16(T1, k2);
+ const __m128i c2 = _mm_mulhi_epi16(T3, k1);
+ const __m128i c3 = _mm_sub_epi16(T1, T3);
+ const __m128i c4 = _mm_sub_epi16(c1, c2);
+ const __m128i c = _mm_add_epi16(c3, c4);
+ // d = MUL(T1, K1) + MUL(T3, K2) = MUL(T1, k1) + MUL(T3, k2) + T1 + T3
+ const __m128i d1 = _mm_mulhi_epi16(T1, k1);
+ const __m128i d2 = _mm_mulhi_epi16(T3, k2);
+ const __m128i d3 = _mm_add_epi16(T1, T3);
+ const __m128i d4 = _mm_add_epi16(d1, d2);
+ const __m128i d = _mm_add_epi16(d3, d4);
+
+ // Second pass.
+ const __m128i tmp0 = _mm_add_epi16(a, d);
+ const __m128i tmp1 = _mm_add_epi16(b, c);
+ const __m128i tmp2 = _mm_sub_epi16(b, c);
+ const __m128i tmp3 = _mm_sub_epi16(a, d);
+ const __m128i shifted0 = _mm_srai_epi16(tmp0, 3);
+ const __m128i shifted1 = _mm_srai_epi16(tmp1, 3);
+ const __m128i shifted2 = _mm_srai_epi16(tmp2, 3);
+ const __m128i shifted3 = _mm_srai_epi16(tmp3, 3);
+
+ // Transpose the two 4x4.
+ // a00 a01 a02 a03 b00 b01 b02 b03
+ // a10 a11 a12 a13 b10 b11 b12 b13
+ // a20 a21 a22 a23 b20 b21 b22 b23
+ // a30 a31 a32 a33 b30 b31 b32 b33
+ const __m128i transpose0_0 = _mm_unpacklo_epi16(shifted0, shifted1);
+ const __m128i transpose0_1 = _mm_unpacklo_epi16(shifted2, shifted3);
+ const __m128i transpose0_2 = _mm_unpackhi_epi16(shifted0, shifted1);
+ const __m128i transpose0_3 = _mm_unpackhi_epi16(shifted2, shifted3);
+ // a00 a10 a01 a11 a02 a12 a03 a13
+ // a20 a30 a21 a31 a22 a32 a23 a33
+ // b00 b10 b01 b11 b02 b12 b03 b13
+ // b20 b30 b21 b31 b22 b32 b23 b33
+ const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1);
+ const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3);
+ const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1);
+ const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3);
+ // a00 a10 a20 a30 a01 a11 a21 a31
+ // b00 b10 b20 b30 b01 b11 b21 b31
+ // a02 a12 a22 a32 a03 a13 a23 a33
+ // b02 b12 a22 b32 b03 b13 b23 b33
+ T0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1);
+ T1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1);
+ T2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3);
+ T3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3);
+ // a00 a10 a20 a30 b00 b10 b20 b30
+ // a01 a11 a21 a31 b01 b11 b21 b31
+ // a02 a12 a22 a32 b02 b12 b22 b32
+ // a03 a13 a23 a33 b03 b13 b23 b33
+ }
+
+ // Add inverse transform to 'ref' and store.
+ {
+ const __m128i zero = _mm_setzero_si128();
+ // Load the reference(s).
+ __m128i ref0, ref1, ref2, ref3;
+ if (do_two) {
+ // Load eight bytes/pixels per line.
+ ref0 = _mm_loadl_epi64((__m128i*)&ref[0 * BPS]);
+ ref1 = _mm_loadl_epi64((__m128i*)&ref[1 * BPS]);
+ ref2 = _mm_loadl_epi64((__m128i*)&ref[2 * BPS]);
+ ref3 = _mm_loadl_epi64((__m128i*)&ref[3 * BPS]);
+ } else {
+ // Load four bytes/pixels per line.
+ ref0 = _mm_cvtsi32_si128(*(int*)&ref[0 * BPS]);
+ ref1 = _mm_cvtsi32_si128(*(int*)&ref[1 * BPS]);
+ ref2 = _mm_cvtsi32_si128(*(int*)&ref[2 * BPS]);
+ ref3 = _mm_cvtsi32_si128(*(int*)&ref[3 * BPS]);
+ }
+ // Convert to 16b.
+ ref0 = _mm_unpacklo_epi8(ref0, zero);
+ ref1 = _mm_unpacklo_epi8(ref1, zero);
+ ref2 = _mm_unpacklo_epi8(ref2, zero);
+ ref3 = _mm_unpacklo_epi8(ref3, zero);
+ // Add the inverse transform(s).
+ ref0 = _mm_add_epi16(ref0, T0);
+ ref1 = _mm_add_epi16(ref1, T1);
+ ref2 = _mm_add_epi16(ref2, T2);
+ ref3 = _mm_add_epi16(ref3, T3);
+ // Unsigned saturate to 8b.
+ ref0 = _mm_packus_epi16(ref0, ref0);
+ ref1 = _mm_packus_epi16(ref1, ref1);
+ ref2 = _mm_packus_epi16(ref2, ref2);
+ ref3 = _mm_packus_epi16(ref3, ref3);
+ // Store the results.
+ if (do_two) {
+ // Store eight bytes/pixels per line.
+ _mm_storel_epi64((__m128i*)&dst[0 * BPS], ref0);
+ _mm_storel_epi64((__m128i*)&dst[1 * BPS], ref1);
+ _mm_storel_epi64((__m128i*)&dst[2 * BPS], ref2);
+ _mm_storel_epi64((__m128i*)&dst[3 * BPS], ref3);
+ } else {
+ // Store four bytes/pixels per line.
+ *((int32_t *)&dst[0 * BPS]) = _mm_cvtsi128_si32(ref0);
+ *((int32_t *)&dst[1 * BPS]) = _mm_cvtsi128_si32(ref1);
+ *((int32_t *)&dst[2 * BPS]) = _mm_cvtsi128_si32(ref2);
+ *((int32_t *)&dst[3 * BPS]) = _mm_cvtsi128_si32(ref3);
+ }
+ }
+}
+
+static void FTransformSSE2(const uint8_t* src, const uint8_t* ref,
+ int16_t* out) {
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i seven = _mm_set1_epi16(7);
+ const __m128i k937 = _mm_set1_epi32(937);
+ const __m128i k1812 = _mm_set1_epi32(1812);
+ const __m128i k51000 = _mm_set1_epi32(51000);
+ const __m128i k12000_plus_one = _mm_set1_epi32(12000 + (1 << 16));
+ const __m128i k5352_2217 = _mm_set_epi16(5352, 2217, 5352, 2217,
+ 5352, 2217, 5352, 2217);
+ const __m128i k2217_5352 = _mm_set_epi16(2217, -5352, 2217, -5352,
+ 2217, -5352, 2217, -5352);
+ const __m128i k88p = _mm_set_epi16(8, 8, 8, 8, 8, 8, 8, 8);
+ const __m128i k88m = _mm_set_epi16(-8, 8, -8, 8, -8, 8, -8, 8);
+ const __m128i k5352_2217p = _mm_set_epi16(2217, 5352, 2217, 5352,
+ 2217, 5352, 2217, 5352);
+ const __m128i k5352_2217m = _mm_set_epi16(-5352, 2217, -5352, 2217,
+ -5352, 2217, -5352, 2217);
+ __m128i v01, v32;
+
+
+ // Difference between src and ref and initial transpose.
+ {
+ // Load src and convert to 16b.
+ const __m128i src0 = _mm_loadl_epi64((__m128i*)&src[0 * BPS]);
+ const __m128i src1 = _mm_loadl_epi64((__m128i*)&src[1 * BPS]);
+ const __m128i src2 = _mm_loadl_epi64((__m128i*)&src[2 * BPS]);
+ const __m128i src3 = _mm_loadl_epi64((__m128i*)&src[3 * BPS]);
+ const __m128i src_0 = _mm_unpacklo_epi8(src0, zero);
+ const __m128i src_1 = _mm_unpacklo_epi8(src1, zero);
+ const __m128i src_2 = _mm_unpacklo_epi8(src2, zero);
+ const __m128i src_3 = _mm_unpacklo_epi8(src3, zero);
+ // Load ref and convert to 16b.
+ const __m128i ref0 = _mm_loadl_epi64((__m128i*)&ref[0 * BPS]);
+ const __m128i ref1 = _mm_loadl_epi64((__m128i*)&ref[1 * BPS]);
+ const __m128i ref2 = _mm_loadl_epi64((__m128i*)&ref[2 * BPS]);
+ const __m128i ref3 = _mm_loadl_epi64((__m128i*)&ref[3 * BPS]);
+ const __m128i ref_0 = _mm_unpacklo_epi8(ref0, zero);
+ const __m128i ref_1 = _mm_unpacklo_epi8(ref1, zero);
+ const __m128i ref_2 = _mm_unpacklo_epi8(ref2, zero);
+ const __m128i ref_3 = _mm_unpacklo_epi8(ref3, zero);
+ // Compute difference. -> 00 01 02 03 00 00 00 00
+ const __m128i diff0 = _mm_sub_epi16(src_0, ref_0);
+ const __m128i diff1 = _mm_sub_epi16(src_1, ref_1);
+ const __m128i diff2 = _mm_sub_epi16(src_2, ref_2);
+ const __m128i diff3 = _mm_sub_epi16(src_3, ref_3);
+
+
+ // Unpack and shuffle
+ // 00 01 02 03 0 0 0 0
+ // 10 11 12 13 0 0 0 0
+ // 20 21 22 23 0 0 0 0
+ // 30 31 32 33 0 0 0 0
+ const __m128i shuf01 = _mm_unpacklo_epi32(diff0, diff1);
+ const __m128i shuf23 = _mm_unpacklo_epi32(diff2, diff3);
+ // 00 01 10 11 02 03 12 13
+ // 20 21 30 31 22 23 32 33
+ const __m128i shuf01_p =
+ _mm_shufflehi_epi16(shuf01, _MM_SHUFFLE(2, 3, 0, 1));
+ const __m128i shuf23_p =
+ _mm_shufflehi_epi16(shuf23, _MM_SHUFFLE(2, 3, 0, 1));
+ // 00 01 10 11 03 02 13 12
+ // 20 21 30 31 23 22 33 32
+ const __m128i s01 = _mm_unpacklo_epi64(shuf01_p, shuf23_p);
+ const __m128i s32 = _mm_unpackhi_epi64(shuf01_p, shuf23_p);
+ // 00 01 10 11 20 21 30 31
+ // 03 02 13 12 23 22 33 32
+ const __m128i a01 = _mm_add_epi16(s01, s32);
+ const __m128i a32 = _mm_sub_epi16(s01, s32);
+ // [d0 + d3 | d1 + d2 | ...] = [a0 a1 | a0' a1' | ... ]
+ // [d0 - d3 | d1 - d2 | ...] = [a3 a2 | a3' a2' | ... ]
+
+ const __m128i tmp0 = _mm_madd_epi16(a01, k88p); // [ (a0 + a1) << 3, ... ]
+ const __m128i tmp2 = _mm_madd_epi16(a01, k88m); // [ (a0 - a1) << 3, ... ]
+ const __m128i tmp1_1 = _mm_madd_epi16(a32, k5352_2217p);
+ const __m128i tmp3_1 = _mm_madd_epi16(a32, k5352_2217m);
+ const __m128i tmp1_2 = _mm_add_epi32(tmp1_1, k1812);
+ const __m128i tmp3_2 = _mm_add_epi32(tmp3_1, k937);
+ const __m128i tmp1 = _mm_srai_epi32(tmp1_2, 9);
+ const __m128i tmp3 = _mm_srai_epi32(tmp3_2, 9);
+ const __m128i s03 = _mm_packs_epi32(tmp0, tmp2);
+ const __m128i s12 = _mm_packs_epi32(tmp1, tmp3);
+ const __m128i s_lo = _mm_unpacklo_epi16(s03, s12); // 0 1 0 1 0 1...
+ const __m128i s_hi = _mm_unpackhi_epi16(s03, s12); // 2 3 2 3 2 3
+ const __m128i v23 = _mm_unpackhi_epi32(s_lo, s_hi);
+ v01 = _mm_unpacklo_epi32(s_lo, s_hi);
+ v32 = _mm_shuffle_epi32(v23, _MM_SHUFFLE(1, 0, 3, 2)); // 3 2 3 2 3 2..
+ }
+
+ // Second pass
+ {
+ // Same operations are done on the (0,3) and (1,2) pairs.
+ // a0 = v0 + v3
+ // a1 = v1 + v2
+ // a3 = v0 - v3
+ // a2 = v1 - v2
+ const __m128i a01 = _mm_add_epi16(v01, v32);
+ const __m128i a32 = _mm_sub_epi16(v01, v32);
+ const __m128i a11 = _mm_unpackhi_epi64(a01, a01);
+ const __m128i a22 = _mm_unpackhi_epi64(a32, a32);
+ const __m128i a01_plus_7 = _mm_add_epi16(a01, seven);
+
+ // d0 = (a0 + a1 + 7) >> 4;
+ // d2 = (a0 - a1 + 7) >> 4;
+ const __m128i c0 = _mm_add_epi16(a01_plus_7, a11);
+ const __m128i c2 = _mm_sub_epi16(a01_plus_7, a11);
+ const __m128i d0 = _mm_srai_epi16(c0, 4);
+ const __m128i d2 = _mm_srai_epi16(c2, 4);
+
+ // f1 = ((b3 * 5352 + b2 * 2217 + 12000) >> 16)
+ // f3 = ((b3 * 2217 - b2 * 5352 + 51000) >> 16)
+ const __m128i b23 = _mm_unpacklo_epi16(a22, a32);
+ const __m128i c1 = _mm_madd_epi16(b23, k5352_2217);
+ const __m128i c3 = _mm_madd_epi16(b23, k2217_5352);
+ const __m128i d1 = _mm_add_epi32(c1, k12000_plus_one);
+ const __m128i d3 = _mm_add_epi32(c3, k51000);
+ const __m128i e1 = _mm_srai_epi32(d1, 16);
+ const __m128i e3 = _mm_srai_epi32(d3, 16);
+ const __m128i f1 = _mm_packs_epi32(e1, e1);
+ const __m128i f3 = _mm_packs_epi32(e3, e3);
+ // f1 = f1 + (a3 != 0);
+ // The compare will return (0xffff, 0) for (==0, !=0). To turn that into the
+ // desired (0, 1), we add one earlier through k12000_plus_one.
+ // -> f1 = f1 + 1 - (a3 == 0)
+ const __m128i g1 = _mm_add_epi16(f1, _mm_cmpeq_epi16(a32, zero));
+
+ _mm_storel_epi64((__m128i*)&out[ 0], d0);
+ _mm_storel_epi64((__m128i*)&out[ 4], g1);
+ _mm_storel_epi64((__m128i*)&out[ 8], d2);
+ _mm_storel_epi64((__m128i*)&out[12], f3);
+ }
+}
+
+static void FTransformWHTSSE2(const int16_t* in, int16_t* out) {
+ int32_t tmp[16];
+ int i;
+ for (i = 0; i < 4; ++i, in += 64) {
+ const int a0 = (in[0 * 16] + in[2 * 16]);
+ const int a1 = (in[1 * 16] + in[3 * 16]);
+ const int a2 = (in[1 * 16] - in[3 * 16]);
+ const int a3 = (in[0 * 16] - in[2 * 16]);
+ tmp[0 + i * 4] = a0 + a1;
+ tmp[1 + i * 4] = a3 + a2;
+ tmp[2 + i * 4] = a3 - a2;
+ tmp[3 + i * 4] = a0 - a1;
+ }
+ {
+ const __m128i src0 = _mm_loadu_si128((__m128i*)&tmp[0]);
+ const __m128i src1 = _mm_loadu_si128((__m128i*)&tmp[4]);
+ const __m128i src2 = _mm_loadu_si128((__m128i*)&tmp[8]);
+ const __m128i src3 = _mm_loadu_si128((__m128i*)&tmp[12]);
+ const __m128i a0 = _mm_add_epi32(src0, src2);
+ const __m128i a1 = _mm_add_epi32(src1, src3);
+ const __m128i a2 = _mm_sub_epi32(src1, src3);
+ const __m128i a3 = _mm_sub_epi32(src0, src2);
+ const __m128i b0 = _mm_srai_epi32(_mm_add_epi32(a0, a1), 1);
+ const __m128i b1 = _mm_srai_epi32(_mm_add_epi32(a3, a2), 1);
+ const __m128i b2 = _mm_srai_epi32(_mm_sub_epi32(a3, a2), 1);
+ const __m128i b3 = _mm_srai_epi32(_mm_sub_epi32(a0, a1), 1);
+ const __m128i out0 = _mm_packs_epi32(b0, b1);
+ const __m128i out1 = _mm_packs_epi32(b2, b3);
+ _mm_storeu_si128((__m128i*)&out[0], out0);
+ _mm_storeu_si128((__m128i*)&out[8], out1);
+ }
+}
+
+//------------------------------------------------------------------------------
+// Metric
+
+static int SSE_Nx4SSE2(const uint8_t* a, const uint8_t* b,
+ int num_quads, int do_16) {
+ const __m128i zero = _mm_setzero_si128();
+ __m128i sum1 = zero;
+ __m128i sum2 = zero;
+
+ while (num_quads-- > 0) {
+ // Note: for the !do_16 case, we read 16 pixels instead of 8 but that's ok,
+ // thanks to buffer over-allocation to that effect.
+ const __m128i a0 = _mm_loadu_si128((__m128i*)&a[BPS * 0]);
+ const __m128i a1 = _mm_loadu_si128((__m128i*)&a[BPS * 1]);
+ const __m128i a2 = _mm_loadu_si128((__m128i*)&a[BPS * 2]);
+ const __m128i a3 = _mm_loadu_si128((__m128i*)&a[BPS * 3]);
+ const __m128i b0 = _mm_loadu_si128((__m128i*)&b[BPS * 0]);
+ const __m128i b1 = _mm_loadu_si128((__m128i*)&b[BPS * 1]);
+ const __m128i b2 = _mm_loadu_si128((__m128i*)&b[BPS * 2]);
+ const __m128i b3 = _mm_loadu_si128((__m128i*)&b[BPS * 3]);
+
+ // compute clip0(a-b) and clip0(b-a)
+ const __m128i a0p = _mm_subs_epu8(a0, b0);
+ const __m128i a0m = _mm_subs_epu8(b0, a0);
+ const __m128i a1p = _mm_subs_epu8(a1, b1);
+ const __m128i a1m = _mm_subs_epu8(b1, a1);
+ const __m128i a2p = _mm_subs_epu8(a2, b2);
+ const __m128i a2m = _mm_subs_epu8(b2, a2);
+ const __m128i a3p = _mm_subs_epu8(a3, b3);
+ const __m128i a3m = _mm_subs_epu8(b3, a3);
+
+ // compute |a-b| with 8b arithmetic as clip0(a-b) | clip0(b-a)
+ const __m128i diff0 = _mm_or_si128(a0p, a0m);
+ const __m128i diff1 = _mm_or_si128(a1p, a1m);
+ const __m128i diff2 = _mm_or_si128(a2p, a2m);
+ const __m128i diff3 = _mm_or_si128(a3p, a3m);
+
+ // unpack (only four operations, instead of eight)
+ const __m128i low0 = _mm_unpacklo_epi8(diff0, zero);
+ const __m128i low1 = _mm_unpacklo_epi8(diff1, zero);
+ const __m128i low2 = _mm_unpacklo_epi8(diff2, zero);
+ const __m128i low3 = _mm_unpacklo_epi8(diff3, zero);
+
+ // multiply with self
+ const __m128i low_madd0 = _mm_madd_epi16(low0, low0);
+ const __m128i low_madd1 = _mm_madd_epi16(low1, low1);
+ const __m128i low_madd2 = _mm_madd_epi16(low2, low2);
+ const __m128i low_madd3 = _mm_madd_epi16(low3, low3);
+
+ // collect in a cascading way
+ const __m128i low_sum0 = _mm_add_epi32(low_madd0, low_madd1);
+ const __m128i low_sum1 = _mm_add_epi32(low_madd2, low_madd3);
+ sum1 = _mm_add_epi32(sum1, low_sum0);
+ sum2 = _mm_add_epi32(sum2, low_sum1);
+
+ if (do_16) { // if necessary, process the higher 8 bytes similarly
+ const __m128i hi0 = _mm_unpackhi_epi8(diff0, zero);
+ const __m128i hi1 = _mm_unpackhi_epi8(diff1, zero);
+ const __m128i hi2 = _mm_unpackhi_epi8(diff2, zero);
+ const __m128i hi3 = _mm_unpackhi_epi8(diff3, zero);
+
+ const __m128i hi_madd0 = _mm_madd_epi16(hi0, hi0);
+ const __m128i hi_madd1 = _mm_madd_epi16(hi1, hi1);
+ const __m128i hi_madd2 = _mm_madd_epi16(hi2, hi2);
+ const __m128i hi_madd3 = _mm_madd_epi16(hi3, hi3);
+ const __m128i hi_sum0 = _mm_add_epi32(hi_madd0, hi_madd1);
+ const __m128i hi_sum1 = _mm_add_epi32(hi_madd2, hi_madd3);
+ sum1 = _mm_add_epi32(sum1, hi_sum0);
+ sum2 = _mm_add_epi32(sum2, hi_sum1);
+ }
+ a += 4 * BPS;
+ b += 4 * BPS;
+ }
+ {
+ int32_t tmp[4];
+ const __m128i sum = _mm_add_epi32(sum1, sum2);
+ _mm_storeu_si128((__m128i*)tmp, sum);
+ return (tmp[3] + tmp[2] + tmp[1] + tmp[0]);
+ }
+}
+
+static int SSE16x16SSE2(const uint8_t* a, const uint8_t* b) {
+ return SSE_Nx4SSE2(a, b, 4, 1);
+}
+
+static int SSE16x8SSE2(const uint8_t* a, const uint8_t* b) {
+ return SSE_Nx4SSE2(a, b, 2, 1);
+}
+
+static int SSE8x8SSE2(const uint8_t* a, const uint8_t* b) {
+ return SSE_Nx4SSE2(a, b, 2, 0);
+}
+
+static int SSE4x4SSE2(const uint8_t* a, const uint8_t* b) {
+ const __m128i zero = _mm_setzero_si128();
+
+ // Load values. Note that we read 8 pixels instead of 4,
+ // but the a/b buffers are over-allocated to that effect.
+ const __m128i a0 = _mm_loadl_epi64((__m128i*)&a[BPS * 0]);
+ const __m128i a1 = _mm_loadl_epi64((__m128i*)&a[BPS * 1]);
+ const __m128i a2 = _mm_loadl_epi64((__m128i*)&a[BPS * 2]);
+ const __m128i a3 = _mm_loadl_epi64((__m128i*)&a[BPS * 3]);
+ const __m128i b0 = _mm_loadl_epi64((__m128i*)&b[BPS * 0]);
+ const __m128i b1 = _mm_loadl_epi64((__m128i*)&b[BPS * 1]);
+ const __m128i b2 = _mm_loadl_epi64((__m128i*)&b[BPS * 2]);
+ const __m128i b3 = _mm_loadl_epi64((__m128i*)&b[BPS * 3]);
+
+ // Combine pair of lines and convert to 16b.
+ const __m128i a01 = _mm_unpacklo_epi32(a0, a1);
+ const __m128i a23 = _mm_unpacklo_epi32(a2, a3);
+ const __m128i b01 = _mm_unpacklo_epi32(b0, b1);
+ const __m128i b23 = _mm_unpacklo_epi32(b2, b3);
+ const __m128i a01s = _mm_unpacklo_epi8(a01, zero);
+ const __m128i a23s = _mm_unpacklo_epi8(a23, zero);
+ const __m128i b01s = _mm_unpacklo_epi8(b01, zero);
+ const __m128i b23s = _mm_unpacklo_epi8(b23, zero);
+
+ // Compute differences; (a-b)^2 = (abs(a-b))^2 = (sat8(a-b) + sat8(b-a))^2
+ // TODO(cduvivier): Dissassemble and figure out why this is fastest. We don't
+ // need absolute values, there is no need to do calculation
+ // in 8bit as we are already in 16bit, ... Yet this is what
+ // benchmarks the fastest!
+ const __m128i d0 = _mm_subs_epu8(a01s, b01s);
+ const __m128i d1 = _mm_subs_epu8(b01s, a01s);
+ const __m128i d2 = _mm_subs_epu8(a23s, b23s);
+ const __m128i d3 = _mm_subs_epu8(b23s, a23s);
+
+ // Square and add them all together.
+ const __m128i madd0 = _mm_madd_epi16(d0, d0);
+ const __m128i madd1 = _mm_madd_epi16(d1, d1);
+ const __m128i madd2 = _mm_madd_epi16(d2, d2);
+ const __m128i madd3 = _mm_madd_epi16(d3, d3);
+ const __m128i sum0 = _mm_add_epi32(madd0, madd1);
+ const __m128i sum1 = _mm_add_epi32(madd2, madd3);
+ const __m128i sum2 = _mm_add_epi32(sum0, sum1);
+
+ int32_t tmp[4];
+ _mm_storeu_si128((__m128i*)tmp, sum2);
+ return (tmp[3] + tmp[2] + tmp[1] + tmp[0]);
+}
+
+//------------------------------------------------------------------------------
+// Texture distortion
+//
+// We try to match the spectral content (weighted) between source and
+// reconstructed samples.
+
+// Hadamard transform
+// Returns the difference between the weighted sum of the absolute value of
+// transformed coefficients.
+static int TTransformSSE2(const uint8_t* inA, const uint8_t* inB,
+ const uint16_t* const w) {
+ int32_t sum[4];
+ __m128i tmp_0, tmp_1, tmp_2, tmp_3;
+ const __m128i zero = _mm_setzero_si128();
+
+ // Load, combine and transpose inputs.
+ {
+ const __m128i inA_0 = _mm_loadl_epi64((__m128i*)&inA[BPS * 0]);
+ const __m128i inA_1 = _mm_loadl_epi64((__m128i*)&inA[BPS * 1]);
+ const __m128i inA_2 = _mm_loadl_epi64((__m128i*)&inA[BPS * 2]);
+ const __m128i inA_3 = _mm_loadl_epi64((__m128i*)&inA[BPS * 3]);
+ const __m128i inB_0 = _mm_loadl_epi64((__m128i*)&inB[BPS * 0]);
+ const __m128i inB_1 = _mm_loadl_epi64((__m128i*)&inB[BPS * 1]);
+ const __m128i inB_2 = _mm_loadl_epi64((__m128i*)&inB[BPS * 2]);
+ const __m128i inB_3 = _mm_loadl_epi64((__m128i*)&inB[BPS * 3]);
+
+ // Combine inA and inB (we'll do two transforms in parallel).
+ const __m128i inAB_0 = _mm_unpacklo_epi8(inA_0, inB_0);
+ const __m128i inAB_1 = _mm_unpacklo_epi8(inA_1, inB_1);
+ const __m128i inAB_2 = _mm_unpacklo_epi8(inA_2, inB_2);
+ const __m128i inAB_3 = _mm_unpacklo_epi8(inA_3, inB_3);
+ // a00 b00 a01 b01 a02 b03 a03 b03 0 0 0 0 0 0 0 0
+ // a10 b10 a11 b11 a12 b12 a13 b13 0 0 0 0 0 0 0 0
+ // a20 b20 a21 b21 a22 b22 a23 b23 0 0 0 0 0 0 0 0
+ // a30 b30 a31 b31 a32 b32 a33 b33 0 0 0 0 0 0 0 0
+
+ // Transpose the two 4x4, discarding the filling zeroes.
+ const __m128i transpose0_0 = _mm_unpacklo_epi8(inAB_0, inAB_2);
+ const __m128i transpose0_1 = _mm_unpacklo_epi8(inAB_1, inAB_3);
+ // a00 a20 b00 b20 a01 a21 b01 b21 a02 a22 b02 b22 a03 a23 b03 b23
+ // a10 a30 b10 b30 a11 a31 b11 b31 a12 a32 b12 b32 a13 a33 b13 b33
+ const __m128i transpose1_0 = _mm_unpacklo_epi8(transpose0_0, transpose0_1);
+ const __m128i transpose1_1 = _mm_unpackhi_epi8(transpose0_0, transpose0_1);
+ // a00 a10 a20 a30 b00 b10 b20 b30 a01 a11 a21 a31 b01 b11 b21 b31
+ // a02 a12 a22 a32 b02 b12 b22 b32 a03 a13 a23 a33 b03 b13 b23 b33
+
+ // Convert to 16b.
+ tmp_0 = _mm_unpacklo_epi8(transpose1_0, zero);
+ tmp_1 = _mm_unpackhi_epi8(transpose1_0, zero);
+ tmp_2 = _mm_unpacklo_epi8(transpose1_1, zero);
+ tmp_3 = _mm_unpackhi_epi8(transpose1_1, zero);
+ // a00 a10 a20 a30 b00 b10 b20 b30
+ // a01 a11 a21 a31 b01 b11 b21 b31
+ // a02 a12 a22 a32 b02 b12 b22 b32
+ // a03 a13 a23 a33 b03 b13 b23 b33
+ }
+
+ // Horizontal pass and subsequent transpose.
+ {
+ // Calculate a and b (two 4x4 at once).
+ const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2);
+ const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3);
+ const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3);
+ const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2);
+ const __m128i b0 = _mm_add_epi16(a0, a1);
+ const __m128i b1 = _mm_add_epi16(a3, a2);
+ const __m128i b2 = _mm_sub_epi16(a3, a2);
+ const __m128i b3 = _mm_sub_epi16(a0, a1);
+ // a00 a01 a02 a03 b00 b01 b02 b03
+ // a10 a11 a12 a13 b10 b11 b12 b13
+ // a20 a21 a22 a23 b20 b21 b22 b23
+ // a30 a31 a32 a33 b30 b31 b32 b33
+
+ // Transpose the two 4x4.
+ const __m128i transpose0_0 = _mm_unpacklo_epi16(b0, b1);
+ const __m128i transpose0_1 = _mm_unpacklo_epi16(b2, b3);
+ const __m128i transpose0_2 = _mm_unpackhi_epi16(b0, b1);
+ const __m128i transpose0_3 = _mm_unpackhi_epi16(b2, b3);
+ // a00 a10 a01 a11 a02 a12 a03 a13
+ // a20 a30 a21 a31 a22 a32 a23 a33
+ // b00 b10 b01 b11 b02 b12 b03 b13
+ // b20 b30 b21 b31 b22 b32 b23 b33
+ const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1);
+ const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3);
+ const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1);
+ const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3);
+ // a00 a10 a20 a30 a01 a11 a21 a31
+ // b00 b10 b20 b30 b01 b11 b21 b31
+ // a02 a12 a22 a32 a03 a13 a23 a33
+ // b02 b12 a22 b32 b03 b13 b23 b33
+ tmp_0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1);
+ tmp_1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1);
+ tmp_2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3);
+ tmp_3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3);
+ // a00 a10 a20 a30 b00 b10 b20 b30
+ // a01 a11 a21 a31 b01 b11 b21 b31
+ // a02 a12 a22 a32 b02 b12 b22 b32
+ // a03 a13 a23 a33 b03 b13 b23 b33
+ }
+
+ // Vertical pass and difference of weighted sums.
+ {
+ // Load all inputs.
+ // TODO(cduvivier): Make variable declarations and allocations aligned so
+ // we can use _mm_load_si128 instead of _mm_loadu_si128.
+ const __m128i w_0 = _mm_loadu_si128((__m128i*)&w[0]);
+ const __m128i w_8 = _mm_loadu_si128((__m128i*)&w[8]);
+
+ // Calculate a and b (two 4x4 at once).
+ const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2);
+ const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3);
+ const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3);
+ const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2);
+ const __m128i b0 = _mm_add_epi16(a0, a1);
+ const __m128i b1 = _mm_add_epi16(a3, a2);
+ const __m128i b2 = _mm_sub_epi16(a3, a2);
+ const __m128i b3 = _mm_sub_epi16(a0, a1);
+
+ // Separate the transforms of inA and inB.
+ __m128i A_b0 = _mm_unpacklo_epi64(b0, b1);
+ __m128i A_b2 = _mm_unpacklo_epi64(b2, b3);
+ __m128i B_b0 = _mm_unpackhi_epi64(b0, b1);
+ __m128i B_b2 = _mm_unpackhi_epi64(b2, b3);
+
+ {
+ // sign(b) = b >> 15 (0x0000 if positive, 0xffff if negative)
+ const __m128i sign_A_b0 = _mm_srai_epi16(A_b0, 15);
+ const __m128i sign_A_b2 = _mm_srai_epi16(A_b2, 15);
+ const __m128i sign_B_b0 = _mm_srai_epi16(B_b0, 15);
+ const __m128i sign_B_b2 = _mm_srai_epi16(B_b2, 15);
+
+ // b = abs(b) = (b ^ sign) - sign
+ A_b0 = _mm_xor_si128(A_b0, sign_A_b0);
+ A_b2 = _mm_xor_si128(A_b2, sign_A_b2);
+ B_b0 = _mm_xor_si128(B_b0, sign_B_b0);
+ B_b2 = _mm_xor_si128(B_b2, sign_B_b2);
+ A_b0 = _mm_sub_epi16(A_b0, sign_A_b0);
+ A_b2 = _mm_sub_epi16(A_b2, sign_A_b2);
+ B_b0 = _mm_sub_epi16(B_b0, sign_B_b0);
+ B_b2 = _mm_sub_epi16(B_b2, sign_B_b2);
+ }
+
+ // weighted sums
+ A_b0 = _mm_madd_epi16(A_b0, w_0);
+ A_b2 = _mm_madd_epi16(A_b2, w_8);
+ B_b0 = _mm_madd_epi16(B_b0, w_0);
+ B_b2 = _mm_madd_epi16(B_b2, w_8);
+ A_b0 = _mm_add_epi32(A_b0, A_b2);
+ B_b0 = _mm_add_epi32(B_b0, B_b2);
+
+ // difference of weighted sums
+ A_b0 = _mm_sub_epi32(A_b0, B_b0);
+ _mm_storeu_si128((__m128i*)&sum[0], A_b0);
+ }
+ return sum[0] + sum[1] + sum[2] + sum[3];
+}
+
+static int Disto4x4SSE2(const uint8_t* const a, const uint8_t* const b,
+ const uint16_t* const w) {
+ const int diff_sum = TTransformSSE2(a, b, w);
+ return abs(diff_sum) >> 5;
+}
+
+static int Disto16x16SSE2(const uint8_t* const a, const uint8_t* const b,
+ const uint16_t* const w) {
+ int D = 0;
+ int x, y;
+ for (y = 0; y < 16 * BPS; y += 4 * BPS) {
+ for (x = 0; x < 16; x += 4) {
+ D += Disto4x4SSE2(a + x + y, b + x + y, w);
+ }
+ }
+ return D;
+}
+
+//------------------------------------------------------------------------------
+// Quantization
+//
+
+// Simple quantization
+static int QuantizeBlockSSE2(int16_t in[16], int16_t out[16],
+ int n, const VP8Matrix* const mtx) {
+ const __m128i max_coeff_2047 = _mm_set1_epi16(MAX_LEVEL);
+ const __m128i zero = _mm_setzero_si128();
+ __m128i coeff0, coeff8;
+ __m128i out0, out8;
+ __m128i packed_out;
+
+ // Load all inputs.
+ // TODO(cduvivier): Make variable declarations and allocations aligned so that
+ // we can use _mm_load_si128 instead of _mm_loadu_si128.
+ __m128i in0 = _mm_loadu_si128((__m128i*)&in[0]);
+ __m128i in8 = _mm_loadu_si128((__m128i*)&in[8]);
+ const __m128i sharpen0 = _mm_loadu_si128((__m128i*)&mtx->sharpen_[0]);
+ const __m128i sharpen8 = _mm_loadu_si128((__m128i*)&mtx->sharpen_[8]);
+ const __m128i iq0 = _mm_loadu_si128((__m128i*)&mtx->iq_[0]);
+ const __m128i iq8 = _mm_loadu_si128((__m128i*)&mtx->iq_[8]);
+ const __m128i bias0 = _mm_loadu_si128((__m128i*)&mtx->bias_[0]);
+ const __m128i bias8 = _mm_loadu_si128((__m128i*)&mtx->bias_[8]);
+ const __m128i q0 = _mm_loadu_si128((__m128i*)&mtx->q_[0]);
+ const __m128i q8 = _mm_loadu_si128((__m128i*)&mtx->q_[8]);
+
+ // sign(in) = in >> 15 (0x0000 if positive, 0xffff if negative)
+ const __m128i sign0 = _mm_srai_epi16(in0, 15);
+ const __m128i sign8 = _mm_srai_epi16(in8, 15);
+
+ // coeff = abs(in) = (in ^ sign) - sign
+ coeff0 = _mm_xor_si128(in0, sign0);
+ coeff8 = _mm_xor_si128(in8, sign8);
+ coeff0 = _mm_sub_epi16(coeff0, sign0);
+ coeff8 = _mm_sub_epi16(coeff8, sign8);
+
+ // coeff = abs(in) + sharpen
+ coeff0 = _mm_add_epi16(coeff0, sharpen0);
+ coeff8 = _mm_add_epi16(coeff8, sharpen8);
+
+ // out = (coeff * iQ + B) >> QFIX;
+ {
+ // doing calculations with 32b precision (QFIX=17)
+ // out = (coeff * iQ)
+ __m128i coeff_iQ0H = _mm_mulhi_epu16(coeff0, iq0);
+ __m128i coeff_iQ0L = _mm_mullo_epi16(coeff0, iq0);
+ __m128i coeff_iQ8H = _mm_mulhi_epu16(coeff8, iq8);
+ __m128i coeff_iQ8L = _mm_mullo_epi16(coeff8, iq8);
+ __m128i out_00 = _mm_unpacklo_epi16(coeff_iQ0L, coeff_iQ0H);
+ __m128i out_04 = _mm_unpackhi_epi16(coeff_iQ0L, coeff_iQ0H);
+ __m128i out_08 = _mm_unpacklo_epi16(coeff_iQ8L, coeff_iQ8H);
+ __m128i out_12 = _mm_unpackhi_epi16(coeff_iQ8L, coeff_iQ8H);
+ // expand bias from 16b to 32b
+ __m128i bias_00 = _mm_unpacklo_epi16(bias0, zero);
+ __m128i bias_04 = _mm_unpackhi_epi16(bias0, zero);
+ __m128i bias_08 = _mm_unpacklo_epi16(bias8, zero);
+ __m128i bias_12 = _mm_unpackhi_epi16(bias8, zero);
+ // out = (coeff * iQ + B)
+ out_00 = _mm_add_epi32(out_00, bias_00);
+ out_04 = _mm_add_epi32(out_04, bias_04);
+ out_08 = _mm_add_epi32(out_08, bias_08);
+ out_12 = _mm_add_epi32(out_12, bias_12);
+ // out = (coeff * iQ + B) >> QFIX;
+ out_00 = _mm_srai_epi32(out_00, QFIX);
+ out_04 = _mm_srai_epi32(out_04, QFIX);
+ out_08 = _mm_srai_epi32(out_08, QFIX);
+ out_12 = _mm_srai_epi32(out_12, QFIX);
+
+ // pack result as 16b
+ out0 = _mm_packs_epi32(out_00, out_04);
+ out8 = _mm_packs_epi32(out_08, out_12);
+
+ // if (coeff > 2047) coeff = 2047
+ out0 = _mm_min_epi16(out0, max_coeff_2047);
+ out8 = _mm_min_epi16(out8, max_coeff_2047);
+ }
+
+ // get sign back (if (sign[j]) out_n = -out_n)
+ out0 = _mm_xor_si128(out0, sign0);
+ out8 = _mm_xor_si128(out8, sign8);
+ out0 = _mm_sub_epi16(out0, sign0);
+ out8 = _mm_sub_epi16(out8, sign8);
+
+ // in = out * Q
+ in0 = _mm_mullo_epi16(out0, q0);
+ in8 = _mm_mullo_epi16(out8, q8);
+
+ _mm_storeu_si128((__m128i*)&in[0], in0);
+ _mm_storeu_si128((__m128i*)&in[8], in8);
+
+ // zigzag the output before storing it.
+ //
+ // The zigzag pattern can almost be reproduced with a small sequence of
+ // shuffles. After it, we only need to swap the 7th (ending up in third
+ // position instead of twelfth) and 8th values.
+ {
+ __m128i outZ0, outZ8;
+ outZ0 = _mm_shufflehi_epi16(out0, _MM_SHUFFLE(2, 1, 3, 0));
+ outZ0 = _mm_shuffle_epi32 (outZ0, _MM_SHUFFLE(3, 1, 2, 0));
+ outZ0 = _mm_shufflehi_epi16(outZ0, _MM_SHUFFLE(3, 1, 0, 2));
+ outZ8 = _mm_shufflelo_epi16(out8, _MM_SHUFFLE(3, 0, 2, 1));
+ outZ8 = _mm_shuffle_epi32 (outZ8, _MM_SHUFFLE(3, 1, 2, 0));
+ outZ8 = _mm_shufflelo_epi16(outZ8, _MM_SHUFFLE(1, 3, 2, 0));
+ _mm_storeu_si128((__m128i*)&out[0], outZ0);
+ _mm_storeu_si128((__m128i*)&out[8], outZ8);
+ packed_out = _mm_packs_epi16(outZ0, outZ8);
+ }
+ {
+ const int16_t outZ_12 = out[12];
+ const int16_t outZ_3 = out[3];
+ out[3] = outZ_12;
+ out[12] = outZ_3;
+ }
+
+ // detect if all 'out' values are zeroes or not
+ {
+ int32_t tmp[4];
+ _mm_storeu_si128((__m128i*)tmp, packed_out);
+ if (n) {
+ tmp[0] &= ~0xff;
+ }
+ return (tmp[3] || tmp[2] || tmp[1] || tmp[0]);
+ }
+}
+
+static int QuantizeBlockWHTSSE2(int16_t in[16], int16_t out[16],
+ const VP8Matrix* const mtx) {
+ return QuantizeBlockSSE2(in, out, 0, mtx);
+}
+
+#endif // WEBP_USE_SSE2
+
+//------------------------------------------------------------------------------
+// Entry point
+
+extern void VP8EncDspInitSSE2(void);
+
+void VP8EncDspInitSSE2(void) {
+#if defined(WEBP_USE_SSE2)
+ VP8CollectHistogram = CollectHistogramSSE2;
+ VP8EncQuantizeBlock = QuantizeBlockSSE2;
+ VP8EncQuantizeBlockWHT = QuantizeBlockWHTSSE2;
+ VP8ITransform = ITransformSSE2;
+ VP8FTransform = FTransformSSE2;
+ VP8FTransformWHT = FTransformWHTSSE2;
+ VP8SSE16x16 = SSE16x16SSE2;
+ VP8SSE16x8 = SSE16x8SSE2;
+ VP8SSE8x8 = SSE8x8SSE2;
+ VP8SSE4x4 = SSE4x4SSE2;
+ VP8TDisto4x4 = Disto4x4SSE2;
+ VP8TDisto16x16 = Disto16x16SSE2;
+#endif // WEBP_USE_SSE2
+}
+
diff --git a/drivers/webp/dsp/lossless.c b/drivers/webp/dsp/lossless.c
new file mode 100644
index 000000000..bab76d22d
--- /dev/null
+++ b/drivers/webp/dsp/lossless.c
@@ -0,0 +1,1532 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the COPYING file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+// -----------------------------------------------------------------------------
+//
+// Image transforms and color space conversion methods for lossless decoder.
+//
+// Authors: Vikas Arora (vikaas.arora@gmail.com)
+// Jyrki Alakuijala (jyrki@google.com)
+// Urvang Joshi (urvang@google.com)
+
+#include "./dsp.h"
+
+#if defined(WEBP_USE_SSE2)
+#include <emmintrin.h>
+#endif
+
+#include <math.h>
+#include <stdlib.h>
+#include "./lossless.h"
+#include "../dec/vp8li.h"
+#include "./yuv.h"
+
+#define MAX_DIFF_COST (1e30f)
+
+// lookup table for small values of log2(int)
+#define APPROX_LOG_MAX 4096
+#define LOG_2_RECIPROCAL 1.44269504088896338700465094007086
+const float kLog2Table[LOG_LOOKUP_IDX_MAX] = {
+ 0.0000000000000000f, 0.0000000000000000f,
+ 1.0000000000000000f, 1.5849625007211560f,
+ 2.0000000000000000f, 2.3219280948873621f,
+ 2.5849625007211560f, 2.8073549220576041f,
+ 3.0000000000000000f, 3.1699250014423121f,
+ 3.3219280948873621f, 3.4594316186372973f,
+ 3.5849625007211560f, 3.7004397181410921f,
+ 3.8073549220576041f, 3.9068905956085187f,
+ 4.0000000000000000f, 4.0874628412503390f,
+ 4.1699250014423121f, 4.2479275134435852f,
+ 4.3219280948873626f, 4.3923174227787606f,
+ 4.4594316186372973f, 4.5235619560570130f,
+ 4.5849625007211560f, 4.6438561897747243f,
+ 4.7004397181410917f, 4.7548875021634682f,
+ 4.8073549220576037f, 4.8579809951275718f,
+ 4.9068905956085187f, 4.9541963103868749f,
+ 5.0000000000000000f, 5.0443941193584533f,
+ 5.0874628412503390f, 5.1292830169449663f,
+ 5.1699250014423121f, 5.2094533656289501f,
+ 5.2479275134435852f, 5.2854022188622487f,
+ 5.3219280948873626f, 5.3575520046180837f,
+ 5.3923174227787606f, 5.4262647547020979f,
+ 5.4594316186372973f, 5.4918530963296747f,
+ 5.5235619560570130f, 5.5545888516776376f,
+ 5.5849625007211560f, 5.6147098441152083f,
+ 5.6438561897747243f, 5.6724253419714951f,
+ 5.7004397181410917f, 5.7279204545631987f,
+ 5.7548875021634682f, 5.7813597135246599f,
+ 5.8073549220576037f, 5.8328900141647412f,
+ 5.8579809951275718f, 5.8826430493618415f,
+ 5.9068905956085187f, 5.9307373375628866f,
+ 5.9541963103868749f, 5.9772799234999167f,
+ 6.0000000000000000f, 6.0223678130284543f,
+ 6.0443941193584533f, 6.0660891904577720f,
+ 6.0874628412503390f, 6.1085244567781691f,
+ 6.1292830169449663f, 6.1497471195046822f,
+ 6.1699250014423121f, 6.1898245588800175f,
+ 6.2094533656289501f, 6.2288186904958804f,
+ 6.2479275134435852f, 6.2667865406949010f,
+ 6.2854022188622487f, 6.3037807481771030f,
+ 6.3219280948873626f, 6.3398500028846243f,
+ 6.3575520046180837f, 6.3750394313469245f,
+ 6.3923174227787606f, 6.4093909361377017f,
+ 6.4262647547020979f, 6.4429434958487279f,
+ 6.4594316186372973f, 6.4757334309663976f,
+ 6.4918530963296747f, 6.5077946401986963f,
+ 6.5235619560570130f, 6.5391588111080309f,
+ 6.5545888516776376f, 6.5698556083309478f,
+ 6.5849625007211560f, 6.5999128421871278f,
+ 6.6147098441152083f, 6.6293566200796094f,
+ 6.6438561897747243f, 6.6582114827517946f,
+ 6.6724253419714951f, 6.6865005271832185f,
+ 6.7004397181410917f, 6.7142455176661224f,
+ 6.7279204545631987f, 6.7414669864011464f,
+ 6.7548875021634682f, 6.7681843247769259f,
+ 6.7813597135246599f, 6.7944158663501061f,
+ 6.8073549220576037f, 6.8201789624151878f,
+ 6.8328900141647412f, 6.8454900509443747f,
+ 6.8579809951275718f, 6.8703647195834047f,
+ 6.8826430493618415f, 6.8948177633079437f,
+ 6.9068905956085187f, 6.9188632372745946f,
+ 6.9307373375628866f, 6.9425145053392398f,
+ 6.9541963103868749f, 6.9657842846620869f,
+ 6.9772799234999167f, 6.9886846867721654f,
+ 7.0000000000000000f, 7.0112272554232539f,
+ 7.0223678130284543f, 7.0334230015374501f,
+ 7.0443941193584533f, 7.0552824355011898f,
+ 7.0660891904577720f, 7.0768155970508308f,
+ 7.0874628412503390f, 7.0980320829605263f,
+ 7.1085244567781691f, 7.1189410727235076f,
+ 7.1292830169449663f, 7.1395513523987936f,
+ 7.1497471195046822f, 7.1598713367783890f,
+ 7.1699250014423121f, 7.1799090900149344f,
+ 7.1898245588800175f, 7.1996723448363644f,
+ 7.2094533656289501f, 7.2191685204621611f,
+ 7.2288186904958804f, 7.2384047393250785f,
+ 7.2479275134435852f, 7.2573878426926521f,
+ 7.2667865406949010f, 7.2761244052742375f,
+ 7.2854022188622487f, 7.2946207488916270f,
+ 7.3037807481771030f, 7.3128829552843557f,
+ 7.3219280948873626f, 7.3309168781146167f,
+ 7.3398500028846243f, 7.3487281542310771f,
+ 7.3575520046180837f, 7.3663222142458160f,
+ 7.3750394313469245f, 7.3837042924740519f,
+ 7.3923174227787606f, 7.4008794362821843f,
+ 7.4093909361377017f, 7.4178525148858982f,
+ 7.4262647547020979f, 7.4346282276367245f,
+ 7.4429434958487279f, 7.4512111118323289f,
+ 7.4594316186372973f, 7.4676055500829976f,
+ 7.4757334309663976f, 7.4838157772642563f,
+ 7.4918530963296747f, 7.4998458870832056f,
+ 7.5077946401986963f, 7.5156998382840427f,
+ 7.5235619560570130f, 7.5313814605163118f,
+ 7.5391588111080309f, 7.5468944598876364f,
+ 7.5545888516776376f, 7.5622424242210728f,
+ 7.5698556083309478f, 7.5774288280357486f,
+ 7.5849625007211560f, 7.5924570372680806f,
+ 7.5999128421871278f, 7.6073303137496104f,
+ 7.6147098441152083f, 7.6220518194563764f,
+ 7.6293566200796094f, 7.6366246205436487f,
+ 7.6438561897747243f, 7.6510516911789281f,
+ 7.6582114827517946f, 7.6653359171851764f,
+ 7.6724253419714951f, 7.6794800995054464f,
+ 7.6865005271832185f, 7.6934869574993252f,
+ 7.7004397181410917f, 7.7073591320808825f,
+ 7.7142455176661224f, 7.7210991887071855f,
+ 7.7279204545631987f, 7.7347096202258383f,
+ 7.7414669864011464f, 7.7481928495894605f,
+ 7.7548875021634682f, 7.7615512324444795f,
+ 7.7681843247769259f, 7.7747870596011736f,
+ 7.7813597135246599f, 7.7879025593914317f,
+ 7.7944158663501061f, 7.8008998999203047f,
+ 7.8073549220576037f, 7.8137811912170374f,
+ 7.8201789624151878f, 7.8265484872909150f,
+ 7.8328900141647412f, 7.8392037880969436f,
+ 7.8454900509443747f, 7.8517490414160571f,
+ 7.8579809951275718f, 7.8641861446542797f,
+ 7.8703647195834047f, 7.8765169465649993f,
+ 7.8826430493618415f, 7.8887432488982591f,
+ 7.8948177633079437f, 7.9008668079807486f,
+ 7.9068905956085187f, 7.9128893362299619f,
+ 7.9188632372745946f, 7.9248125036057812f,
+ 7.9307373375628866f, 7.9366379390025709f,
+ 7.9425145053392398f, 7.9483672315846778f,
+ 7.9541963103868749f, 7.9600019320680805f,
+ 7.9657842846620869f, 7.9715435539507719f,
+ 7.9772799234999167f, 7.9829935746943103f,
+ 7.9886846867721654f, 7.9943534368588577f
+};
+
+const float kSLog2Table[LOG_LOOKUP_IDX_MAX] = {
+ 0.00000000f, 0.00000000f, 2.00000000f, 4.75488750f,
+ 8.00000000f, 11.60964047f, 15.50977500f, 19.65148445f,
+ 24.00000000f, 28.52932501f, 33.21928095f, 38.05374781f,
+ 43.01955001f, 48.10571634f, 53.30296891f, 58.60335893f,
+ 64.00000000f, 69.48686830f, 75.05865003f, 80.71062276f,
+ 86.43856190f, 92.23866588f, 98.10749561f, 104.04192499f,
+ 110.03910002f, 116.09640474f, 122.21143267f, 128.38196256f,
+ 134.60593782f, 140.88144886f, 147.20671787f, 153.58008562f,
+ 160.00000000f, 166.46500594f, 172.97373660f, 179.52490559f,
+ 186.11730005f, 192.74977453f, 199.42124551f, 206.13068654f,
+ 212.87712380f, 219.65963219f, 226.47733176f, 233.32938445f,
+ 240.21499122f, 247.13338933f, 254.08384998f, 261.06567603f,
+ 268.07820003f, 275.12078236f, 282.19280949f, 289.29369244f,
+ 296.42286534f, 303.57978409f, 310.76392512f, 317.97478424f,
+ 325.21187564f, 332.47473081f, 339.76289772f, 347.07593991f,
+ 354.41343574f, 361.77497759f, 369.16017124f, 376.56863518f,
+ 384.00000000f, 391.45390785f, 398.93001188f, 406.42797576f,
+ 413.94747321f, 421.48818752f, 429.04981119f, 436.63204548f,
+ 444.23460010f, 451.85719280f, 459.49954906f, 467.16140179f,
+ 474.84249102f, 482.54256363f, 490.26137307f, 497.99867911f,
+ 505.75424759f, 513.52785023f, 521.31926438f, 529.12827280f,
+ 536.95466351f, 544.79822957f, 552.65876890f, 560.53608414f,
+ 568.42998244f, 576.34027536f, 584.26677867f, 592.20931226f,
+ 600.16769996f, 608.14176943f, 616.13135206f, 624.13628279f,
+ 632.15640007f, 640.19154569f, 648.24156472f, 656.30630539f,
+ 664.38561898f, 672.47935976f, 680.58738488f, 688.70955430f,
+ 696.84573069f, 704.99577935f, 713.15956818f, 721.33696754f,
+ 729.52785023f, 737.73209140f, 745.94956849f, 754.18016116f,
+ 762.42375127f, 770.68022275f, 778.94946161f, 787.23135586f,
+ 795.52579543f, 803.83267219f, 812.15187982f, 820.48331383f,
+ 828.82687147f, 837.18245171f, 845.54995518f, 853.92928416f,
+ 862.32034249f, 870.72303558f, 879.13727036f, 887.56295522f,
+ 896.00000000f, 904.44831595f, 912.90781569f, 921.37841320f,
+ 929.86002376f, 938.35256392f, 946.85595152f, 955.37010560f,
+ 963.89494641f, 972.43039537f, 980.97637504f, 989.53280911f,
+ 998.09962237f, 1006.67674069f, 1015.26409097f, 1023.86160116f,
+ 1032.46920021f, 1041.08681805f, 1049.71438560f, 1058.35183469f,
+ 1066.99909811f, 1075.65610955f, 1084.32280357f, 1092.99911564f,
+ 1101.68498204f, 1110.38033993f, 1119.08512727f, 1127.79928282f,
+ 1136.52274614f, 1145.25545758f, 1153.99735821f, 1162.74838989f,
+ 1171.50849518f, 1180.27761738f, 1189.05570047f, 1197.84268914f,
+ 1206.63852876f, 1215.44316535f, 1224.25654560f, 1233.07861684f,
+ 1241.90932703f, 1250.74862473f, 1259.59645914f, 1268.45278005f,
+ 1277.31753781f, 1286.19068338f, 1295.07216828f, 1303.96194457f,
+ 1312.85996488f, 1321.76618236f, 1330.68055071f, 1339.60302413f,
+ 1348.53355734f, 1357.47210556f, 1366.41862452f, 1375.37307041f,
+ 1384.33539991f, 1393.30557020f, 1402.28353887f, 1411.26926400f,
+ 1420.26270412f, 1429.26381818f, 1438.27256558f, 1447.28890615f,
+ 1456.31280014f, 1465.34420819f, 1474.38309138f, 1483.42941118f,
+ 1492.48312945f, 1501.54420843f, 1510.61261078f, 1519.68829949f,
+ 1528.77123795f, 1537.86138993f, 1546.95871952f, 1556.06319119f,
+ 1565.17476976f, 1574.29342040f, 1583.41910860f, 1592.55180020f,
+ 1601.69146137f, 1610.83805860f, 1619.99155871f, 1629.15192882f,
+ 1638.31913637f, 1647.49314911f, 1656.67393509f, 1665.86146266f,
+ 1675.05570047f, 1684.25661744f, 1693.46418280f, 1702.67836605f,
+ 1711.89913698f, 1721.12646563f, 1730.36032233f, 1739.60067768f,
+ 1748.84750254f, 1758.10076802f, 1767.36044551f, 1776.62650662f,
+ 1785.89892323f, 1795.17766747f, 1804.46271172f, 1813.75402857f,
+ 1823.05159087f, 1832.35537170f, 1841.66534438f, 1850.98148244f,
+ 1860.30375965f, 1869.63214999f, 1878.96662767f, 1888.30716711f,
+ 1897.65374295f, 1907.00633003f, 1916.36490342f, 1925.72943838f,
+ 1935.09991037f, 1944.47629506f, 1953.85856831f, 1963.24670620f,
+ 1972.64068498f, 1982.04048108f, 1991.44607117f, 2000.85743204f,
+ 2010.27454072f, 2019.69737440f, 2029.12591044f, 2038.56012640f
+};
+
+const VP8LPrefixCode kPrefixEncodeCode[PREFIX_LOOKUP_IDX_MAX] = {
+ { 0, 0}, { 0, 0}, { 1, 0}, { 2, 0}, { 3, 0}, { 4, 1}, { 4, 1}, { 5, 1},
+ { 5, 1}, { 6, 2}, { 6, 2}, { 6, 2}, { 6, 2}, { 7, 2}, { 7, 2}, { 7, 2},
+ { 7, 2}, { 8, 3}, { 8, 3}, { 8, 3}, { 8, 3}, { 8, 3}, { 8, 3}, { 8, 3},
+ { 8, 3}, { 9, 3}, { 9, 3}, { 9, 3}, { 9, 3}, { 9, 3}, { 9, 3}, { 9, 3},
+ { 9, 3}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4},
+ {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4},
+ {10, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4},
+ {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4},
+ {11, 4}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5},
+ {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5},
+ {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5},
+ {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5},
+ {12, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5},
+ {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5},
+ {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5},
+ {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5},
+ {13, 5}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
+ {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
+ {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
+ {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
+ {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
+ {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
+ {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
+ {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
+ {14, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
+ {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
+ {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
+ {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
+ {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
+ {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
+ {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
+ {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
+ {15, 6}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
+ {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
+ {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
+ {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
+ {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
+ {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
+ {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
+ {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
+ {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
+ {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
+ {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
+ {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
+ {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
+ {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
+ {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
+ {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
+ {16, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
+ {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
+ {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
+ {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
+ {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
+ {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
+ {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
+ {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
+ {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
+ {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
+ {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
+ {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
+ {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
+ {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
+ {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
+ {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
+};
+
+const uint8_t kPrefixEncodeExtraBitsValue[PREFIX_LOOKUP_IDX_MAX] = {
+ 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 2, 3, 0, 1, 2, 3,
+ 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,
+ 127,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126
+};
+
+float VP8LFastSLog2Slow(int v) {
+ assert(v >= LOG_LOOKUP_IDX_MAX);
+ if (v < APPROX_LOG_MAX) {
+ int log_cnt = 0;
+ const float v_f = (float)v;
+ while (v >= LOG_LOOKUP_IDX_MAX) {
+ ++log_cnt;
+ v = v >> 1;
+ }
+ return v_f * (kLog2Table[v] + log_cnt);
+ } else {
+ return (float)(LOG_2_RECIPROCAL * v * log((double)v));
+ }
+}
+
+float VP8LFastLog2Slow(int v) {
+ assert(v >= LOG_LOOKUP_IDX_MAX);
+ if (v < APPROX_LOG_MAX) {
+ int log_cnt = 0;
+ while (v >= LOG_LOOKUP_IDX_MAX) {
+ ++log_cnt;
+ v = v >> 1;
+ }
+ return kLog2Table[v] + log_cnt;
+ } else {
+ return (float)(LOG_2_RECIPROCAL * log((double)v));
+ }
+}
+
+//------------------------------------------------------------------------------
+// Image transforms.
+
+// In-place sum of each component with mod 256.
+static WEBP_INLINE void AddPixelsEq(uint32_t* a, uint32_t b) {
+ const uint32_t alpha_and_green = (*a & 0xff00ff00u) + (b & 0xff00ff00u);
+ const uint32_t red_and_blue = (*a & 0x00ff00ffu) + (b & 0x00ff00ffu);
+ *a = (alpha_and_green & 0xff00ff00u) | (red_and_blue & 0x00ff00ffu);
+}
+
+static WEBP_INLINE uint32_t Average2(uint32_t a0, uint32_t a1) {
+ return (((a0 ^ a1) & 0xfefefefeL) >> 1) + (a0 & a1);
+}
+
+static WEBP_INLINE uint32_t Average3(uint32_t a0, uint32_t a1, uint32_t a2) {
+ return Average2(Average2(a0, a2), a1);
+}
+
+static WEBP_INLINE uint32_t Average4(uint32_t a0, uint32_t a1,
+ uint32_t a2, uint32_t a3) {
+ return Average2(Average2(a0, a1), Average2(a2, a3));
+}
+
+static WEBP_INLINE uint32_t Clip255(uint32_t a) {
+ if (a < 256) {
+ return a;
+ }
+ // return 0, when a is a negative integer.
+ // return 255, when a is positive.
+ return ~a >> 24;
+}
+
+static WEBP_INLINE int AddSubtractComponentFull(int a, int b, int c) {
+ return Clip255(a + b - c);
+}
+
+static WEBP_INLINE uint32_t ClampedAddSubtractFull(uint32_t c0, uint32_t c1,
+ uint32_t c2) {
+ const int a = AddSubtractComponentFull(c0 >> 24, c1 >> 24, c2 >> 24);
+ const int r = AddSubtractComponentFull((c0 >> 16) & 0xff,
+ (c1 >> 16) & 0xff,
+ (c2 >> 16) & 0xff);
+ const int g = AddSubtractComponentFull((c0 >> 8) & 0xff,
+ (c1 >> 8) & 0xff,
+ (c2 >> 8) & 0xff);
+ const int b = AddSubtractComponentFull(c0 & 0xff, c1 & 0xff, c2 & 0xff);
+ return (a << 24) | (r << 16) | (g << 8) | b;
+}
+
+static WEBP_INLINE int AddSubtractComponentHalf(int a, int b) {
+ return Clip255(a + (a - b) / 2);
+}
+
+static WEBP_INLINE uint32_t ClampedAddSubtractHalf(uint32_t c0, uint32_t c1,
+ uint32_t c2) {
+ const uint32_t ave = Average2(c0, c1);
+ const int a = AddSubtractComponentHalf(ave >> 24, c2 >> 24);
+ const int r = AddSubtractComponentHalf((ave >> 16) & 0xff, (c2 >> 16) & 0xff);
+ const int g = AddSubtractComponentHalf((ave >> 8) & 0xff, (c2 >> 8) & 0xff);
+ const int b = AddSubtractComponentHalf((ave >> 0) & 0xff, (c2 >> 0) & 0xff);
+ return (a << 24) | (r << 16) | (g << 8) | b;
+}
+
+static WEBP_INLINE int Sub3(int a, int b, int c) {
+ const int pb = b - c;
+ const int pa = a - c;
+ return abs(pb) - abs(pa);
+}
+
+static WEBP_INLINE uint32_t Select(uint32_t a, uint32_t b, uint32_t c) {
+ const int pa_minus_pb =
+ Sub3((a >> 24) , (b >> 24) , (c >> 24) ) +
+ Sub3((a >> 16) & 0xff, (b >> 16) & 0xff, (c >> 16) & 0xff) +
+ Sub3((a >> 8) & 0xff, (b >> 8) & 0xff, (c >> 8) & 0xff) +
+ Sub3((a ) & 0xff, (b ) & 0xff, (c ) & 0xff);
+ return (pa_minus_pb <= 0) ? a : b;
+}
+
+//------------------------------------------------------------------------------
+// Predictors
+
+static uint32_t Predictor0(uint32_t left, const uint32_t* const top) {
+ (void)top;
+ (void)left;
+ return ARGB_BLACK;
+}
+static uint32_t Predictor1(uint32_t left, const uint32_t* const top) {
+ (void)top;
+ return left;
+}
+static uint32_t Predictor2(uint32_t left, const uint32_t* const top) {
+ (void)left;
+ return top[0];
+}
+static uint32_t Predictor3(uint32_t left, const uint32_t* const top) {
+ (void)left;
+ return top[1];
+}
+static uint32_t Predictor4(uint32_t left, const uint32_t* const top) {
+ (void)left;
+ return top[-1];
+}
+static uint32_t Predictor5(uint32_t left, const uint32_t* const top) {
+ const uint32_t pred = Average3(left, top[0], top[1]);
+ return pred;
+}
+static uint32_t Predictor6(uint32_t left, const uint32_t* const top) {
+ const uint32_t pred = Average2(left, top[-1]);
+ return pred;
+}
+static uint32_t Predictor7(uint32_t left, const uint32_t* const top) {
+ const uint32_t pred = Average2(left, top[0]);
+ return pred;
+}
+static uint32_t Predictor8(uint32_t left, const uint32_t* const top) {
+ const uint32_t pred = Average2(top[-1], top[0]);
+ (void)left;
+ return pred;
+}
+static uint32_t Predictor9(uint32_t left, const uint32_t* const top) {
+ const uint32_t pred = Average2(top[0], top[1]);
+ (void)left;
+ return pred;
+}
+static uint32_t Predictor10(uint32_t left, const uint32_t* const top) {
+ const uint32_t pred = Average4(left, top[-1], top[0], top[1]);
+ return pred;
+}
+static uint32_t Predictor11(uint32_t left, const uint32_t* const top) {
+ const uint32_t pred = VP8LSelect(top[0], left, top[-1]);
+ return pred;
+}
+static uint32_t Predictor12(uint32_t left, const uint32_t* const top) {
+ const uint32_t pred = VP8LClampedAddSubtractFull(left, top[0], top[-1]);
+ return pred;
+}
+static uint32_t Predictor13(uint32_t left, const uint32_t* const top) {
+ const uint32_t pred = VP8LClampedAddSubtractHalf(left, top[0], top[-1]);
+ return pred;
+}
+
+// TODO(vikasa): Export the predictor array, to allow SSE2 variants.
+typedef uint32_t (*PredictorFunc)(uint32_t left, const uint32_t* const top);
+static const PredictorFunc kPredictors[16] = {
+ Predictor0, Predictor1, Predictor2, Predictor3,
+ Predictor4, Predictor5, Predictor6, Predictor7,
+ Predictor8, Predictor9, Predictor10, Predictor11,
+ Predictor12, Predictor13,
+ Predictor0, Predictor0 // <- padding security sentinels
+};
+
+// TODO(vikasa): Replace 256 etc with defines.
+static float PredictionCostSpatial(const int* counts,
+ int weight_0, double exp_val) {
+ const int significant_symbols = 16;
+ const double exp_decay_factor = 0.6;
+ double bits = weight_0 * counts[0];
+ int i;
+ for (i = 1; i < significant_symbols; ++i) {
+ bits += exp_val * (counts[i] + counts[256 - i]);
+ exp_val *= exp_decay_factor;
+ }
+ return (float)(-0.1 * bits);
+}
+
+// Compute the combined Shanon's entropy for distribution {X} and {X+Y}
+static float CombinedShannonEntropy(const int* const X,
+ const int* const Y, int n) {
+ int i;
+ double retval = 0.;
+ int sumX = 0, sumXY = 0;
+ for (i = 0; i < n; ++i) {
+ const int x = X[i];
+ const int xy = X[i] + Y[i];
+ if (x != 0) {
+ sumX += x;
+ retval -= VP8LFastSLog2(x);
+ }
+ if (xy != 0) {
+ sumXY += xy;
+ retval -= VP8LFastSLog2(xy);
+ }
+ }
+ retval += VP8LFastSLog2(sumX) + VP8LFastSLog2(sumXY);
+ return (float)retval;
+}
+
+static float PredictionCostSpatialHistogram(int accumulated[4][256],
+ int tile[4][256]) {
+ int i;
+ double retval = 0;
+ for (i = 0; i < 4; ++i) {
+ const double kExpValue = 0.94;
+ retval += PredictionCostSpatial(tile[i], 1, kExpValue);
+ retval += CombinedShannonEntropy(tile[i], accumulated[i], 256);
+ }
+ return (float)retval;
+}
+
+static int GetBestPredictorForTile(int width, int height,
+ int tile_x, int tile_y, int bits,
+ int accumulated[4][256],
+ const uint32_t* const argb_scratch) {
+ const int kNumPredModes = 14;
+ const int col_start = tile_x << bits;
+ const int row_start = tile_y << bits;
+ const int tile_size = 1 << bits;
+ const int ymax = (tile_size <= height - row_start) ?
+ tile_size : height - row_start;
+ const int xmax = (tile_size <= width - col_start) ?
+ tile_size : width - col_start;
+ int histo[4][256];
+ float best_diff = MAX_DIFF_COST;
+ int best_mode = 0;
+
+ int mode;
+ for (mode = 0; mode < kNumPredModes; ++mode) {
+ const uint32_t* current_row = argb_scratch;
+ const PredictorFunc pred_func = kPredictors[mode];
+ float cur_diff;
+ int y;
+ memset(&histo[0][0], 0, sizeof(histo));
+ for (y = 0; y < ymax; ++y) {
+ int x;
+ const int row = row_start + y;
+ const uint32_t* const upper_row = current_row;
+ current_row = upper_row + width;
+ for (x = 0; x < xmax; ++x) {
+ const int col = col_start + x;
+ uint32_t predict;
+ uint32_t predict_diff;
+ if (row == 0) {
+ predict = (col == 0) ? ARGB_BLACK : current_row[col - 1]; // Left.
+ } else if (col == 0) {
+ predict = upper_row[col]; // Top.
+ } else {
+ predict = pred_func(current_row[col - 1], upper_row + col);
+ }
+ predict_diff = VP8LSubPixels(current_row[col], predict);
+ ++histo[0][predict_diff >> 24];
+ ++histo[1][((predict_diff >> 16) & 0xff)];
+ ++histo[2][((predict_diff >> 8) & 0xff)];
+ ++histo[3][(predict_diff & 0xff)];
+ }
+ }
+ cur_diff = PredictionCostSpatialHistogram(accumulated, histo);
+ if (cur_diff < best_diff) {
+ best_diff = cur_diff;
+ best_mode = mode;
+ }
+ }
+
+ return best_mode;
+}
+
+static void CopyTileWithPrediction(int width, int height,
+ int tile_x, int tile_y, int bits, int mode,
+ const uint32_t* const argb_scratch,
+ uint32_t* const argb) {
+ const int col_start = tile_x << bits;
+ const int row_start = tile_y << bits;
+ const int tile_size = 1 << bits;
+ const int ymax = (tile_size <= height - row_start) ?
+ tile_size : height - row_start;
+ const int xmax = (tile_size <= width - col_start) ?
+ tile_size : width - col_start;
+ const PredictorFunc pred_func = kPredictors[mode];
+ const uint32_t* current_row = argb_scratch;
+
+ int y;
+ for (y = 0; y < ymax; ++y) {
+ int x;
+ const int row = row_start + y;
+ const uint32_t* const upper_row = current_row;
+ current_row = upper_row + width;
+ for (x = 0; x < xmax; ++x) {
+ const int col = col_start + x;
+ const int pix = row * width + col;
+ uint32_t predict;
+ if (row == 0) {
+ predict = (col == 0) ? ARGB_BLACK : current_row[col - 1]; // Left.
+ } else if (col == 0) {
+ predict = upper_row[col]; // Top.
+ } else {
+ predict = pred_func(current_row[col - 1], upper_row + col);
+ }
+ argb[pix] = VP8LSubPixels(current_row[col], predict);
+ }
+ }
+}
+
+void VP8LResidualImage(int width, int height, int bits,
+ uint32_t* const argb, uint32_t* const argb_scratch,
+ uint32_t* const image) {
+ const int max_tile_size = 1 << bits;
+ const int tiles_per_row = VP8LSubSampleSize(width, bits);
+ const int tiles_per_col = VP8LSubSampleSize(height, bits);
+ uint32_t* const upper_row = argb_scratch;
+ uint32_t* const current_tile_rows = argb_scratch + width;
+ int tile_y;
+ int histo[4][256];
+ memset(histo, 0, sizeof(histo));
+ for (tile_y = 0; tile_y < tiles_per_col; ++tile_y) {
+ const int tile_y_offset = tile_y * max_tile_size;
+ const int this_tile_height =
+ (tile_y < tiles_per_col - 1) ? max_tile_size : height - tile_y_offset;
+ int tile_x;
+ if (tile_y > 0) {
+ memcpy(upper_row, current_tile_rows + (max_tile_size - 1) * width,
+ width * sizeof(*upper_row));
+ }
+ memcpy(current_tile_rows, &argb[tile_y_offset * width],
+ this_tile_height * width * sizeof(*current_tile_rows));
+ for (tile_x = 0; tile_x < tiles_per_row; ++tile_x) {
+ int pred;
+ int y;
+ const int tile_x_offset = tile_x * max_tile_size;
+ int all_x_max = tile_x_offset + max_tile_size;
+ if (all_x_max > width) {
+ all_x_max = width;
+ }
+ pred = GetBestPredictorForTile(width, height, tile_x, tile_y, bits, histo,
+ argb_scratch);
+ image[tile_y * tiles_per_row + tile_x] = 0xff000000u | (pred << 8);
+ CopyTileWithPrediction(width, height, tile_x, tile_y, bits, pred,
+ argb_scratch, argb);
+ for (y = 0; y < max_tile_size; ++y) {
+ int ix;
+ int all_x;
+ int all_y = tile_y_offset + y;
+ if (all_y >= height) {
+ break;
+ }
+ ix = all_y * width + tile_x_offset;
+ for (all_x = tile_x_offset; all_x < all_x_max; ++all_x, ++ix) {
+ const uint32_t a = argb[ix];
+ ++histo[0][a >> 24];
+ ++histo[1][((a >> 16) & 0xff)];
+ ++histo[2][((a >> 8) & 0xff)];
+ ++histo[3][(a & 0xff)];
+ }
+ }
+ }
+ }
+}
+
+// Inverse prediction.
+static void PredictorInverseTransform(const VP8LTransform* const transform,
+ int y_start, int y_end, uint32_t* data) {
+ const int width = transform->xsize_;
+ if (y_start == 0) { // First Row follows the L (mode=1) mode.
+ int x;
+ const uint32_t pred0 = Predictor0(data[-1], NULL);
+ AddPixelsEq(data, pred0);
+ for (x = 1; x < width; ++x) {
+ const uint32_t pred1 = Predictor1(data[x - 1], NULL);
+ AddPixelsEq(data + x, pred1);
+ }
+ data += width;
+ ++y_start;
+ }
+
+ {
+ int y = y_start;
+ const int mask = (1 << transform->bits_) - 1;
+ const int tiles_per_row = VP8LSubSampleSize(width, transform->bits_);
+ const uint32_t* pred_mode_base =
+ transform->data_ + (y >> transform->bits_) * tiles_per_row;
+
+ while (y < y_end) {
+ int x;
+ const uint32_t pred2 = Predictor2(data[-1], data - width);
+ const uint32_t* pred_mode_src = pred_mode_base;
+ PredictorFunc pred_func;
+
+ // First pixel follows the T (mode=2) mode.
+ AddPixelsEq(data, pred2);
+
+ // .. the rest:
+ pred_func = kPredictors[((*pred_mode_src++) >> 8) & 0xf];
+ for (x = 1; x < width; ++x) {
+ uint32_t pred;
+ if ((x & mask) == 0) { // start of tile. Read predictor function.
+ pred_func = kPredictors[((*pred_mode_src++) >> 8) & 0xf];
+ }
+ pred = pred_func(data[x - 1], data + x - width);
+ AddPixelsEq(data + x, pred);
+ }
+ data += width;
+ ++y;
+ if ((y & mask) == 0) { // Use the same mask, since tiles are squares.
+ pred_mode_base += tiles_per_row;
+ }
+ }
+ }
+}
+
+static void SubtractGreenFromBlueAndRed(uint32_t* argb_data, int num_pixs) {
+ int i = 0;
+ for (; i < num_pixs; ++i) {
+ const uint32_t argb = argb_data[i];
+ const uint32_t green = (argb >> 8) & 0xff;
+ const uint32_t new_r = (((argb >> 16) & 0xff) - green) & 0xff;
+ const uint32_t new_b = ((argb & 0xff) - green) & 0xff;
+ argb_data[i] = (argb & 0xff00ff00) | (new_r << 16) | new_b;
+ }
+}
+
+// Add green to blue and red channels (i.e. perform the inverse transform of
+// 'subtract green').
+static void AddGreenToBlueAndRed(uint32_t* data, const uint32_t* data_end) {
+ while (data < data_end) {
+ const uint32_t argb = *data;
+ const uint32_t green = ((argb >> 8) & 0xff);
+ uint32_t red_blue = (argb & 0x00ff00ffu);
+ red_blue += (green << 16) | green;
+ red_blue &= 0x00ff00ffu;
+ *data++ = (argb & 0xff00ff00u) | red_blue;
+ }
+}
+
+typedef struct {
+ // Note: the members are uint8_t, so that any negative values are
+ // automatically converted to "mod 256" values.
+ uint8_t green_to_red_;
+ uint8_t green_to_blue_;
+ uint8_t red_to_blue_;
+} Multipliers;
+
+static WEBP_INLINE void MultipliersClear(Multipliers* m) {
+ m->green_to_red_ = 0;
+ m->green_to_blue_ = 0;
+ m->red_to_blue_ = 0;
+}
+
+static WEBP_INLINE uint32_t ColorTransformDelta(int8_t color_pred,
+ int8_t color) {
+ return (uint32_t)((int)(color_pred) * color) >> 5;
+}
+
+static WEBP_INLINE void ColorCodeToMultipliers(uint32_t color_code,
+ Multipliers* const m) {
+ m->green_to_red_ = (color_code >> 0) & 0xff;
+ m->green_to_blue_ = (color_code >> 8) & 0xff;
+ m->red_to_blue_ = (color_code >> 16) & 0xff;
+}
+
+static WEBP_INLINE uint32_t MultipliersToColorCode(Multipliers* const m) {
+ return 0xff000000u |
+ ((uint32_t)(m->red_to_blue_) << 16) |
+ ((uint32_t)(m->green_to_blue_) << 8) |
+ m->green_to_red_;
+}
+
+static WEBP_INLINE uint32_t TransformColor(const Multipliers* const m,
+ uint32_t argb, int inverse) {
+ const uint32_t green = argb >> 8;
+ const uint32_t red = argb >> 16;
+ uint32_t new_red = red;
+ uint32_t new_blue = argb;
+
+ if (inverse) {
+ new_red += ColorTransformDelta(m->green_to_red_, green);
+ new_red &= 0xff;
+ new_blue += ColorTransformDelta(m->green_to_blue_, green);
+ new_blue += ColorTransformDelta(m->red_to_blue_, new_red);
+ new_blue &= 0xff;
+ } else {
+ new_red -= ColorTransformDelta(m->green_to_red_, green);
+ new_red &= 0xff;
+ new_blue -= ColorTransformDelta(m->green_to_blue_, green);
+ new_blue -= ColorTransformDelta(m->red_to_blue_, red);
+ new_blue &= 0xff;
+ }
+ return (argb & 0xff00ff00u) | (new_red << 16) | (new_blue);
+}
+
+static WEBP_INLINE uint8_t TransformColorRed(uint8_t green_to_red,
+ uint32_t argb) {
+ const uint32_t green = argb >> 8;
+ uint32_t new_red = argb >> 16;
+ new_red -= ColorTransformDelta(green_to_red, green);
+ return (new_red & 0xff);
+}
+
+static WEBP_INLINE uint8_t TransformColorBlue(uint8_t green_to_blue,
+ uint8_t red_to_blue,
+ uint32_t argb) {
+ const uint32_t green = argb >> 8;
+ const uint32_t red = argb >> 16;
+ uint8_t new_blue = argb;
+ new_blue -= ColorTransformDelta(green_to_blue, green);
+ new_blue -= ColorTransformDelta(red_to_blue, red);
+ return (new_blue & 0xff);
+}
+
+static WEBP_INLINE int SkipRepeatedPixels(const uint32_t* const argb,
+ int ix, int xsize) {
+ const uint32_t v = argb[ix];
+ if (ix >= xsize + 3) {
+ if (v == argb[ix - xsize] &&
+ argb[ix - 1] == argb[ix - xsize - 1] &&
+ argb[ix - 2] == argb[ix - xsize - 2] &&
+ argb[ix - 3] == argb[ix - xsize - 3]) {
+ return 1;
+ }
+ return v == argb[ix - 3] && v == argb[ix - 2] && v == argb[ix - 1];
+ } else if (ix >= 3) {
+ return v == argb[ix - 3] && v == argb[ix - 2] && v == argb[ix - 1];
+ }
+ return 0;
+}
+
+static float PredictionCostCrossColor(const int accumulated[256],
+ const int counts[256]) {
+ // Favor low entropy, locally and globally.
+ // Favor small absolute values for PredictionCostSpatial
+ static const double kExpValue = 2.4;
+ return CombinedShannonEntropy(counts, accumulated, 256) +
+ PredictionCostSpatial(counts, 3, kExpValue);
+}
+
+static Multipliers GetBestColorTransformForTile(
+ int tile_x, int tile_y, int bits,
+ Multipliers prevX,
+ Multipliers prevY,
+ int step, int xsize, int ysize,
+ int* accumulated_red_histo,
+ int* accumulated_blue_histo,
+ const uint32_t* const argb) {
+ float best_diff = MAX_DIFF_COST;
+ float cur_diff;
+ const int halfstep = step / 2;
+ const int max_tile_size = 1 << bits;
+ const int tile_y_offset = tile_y * max_tile_size;
+ const int tile_x_offset = tile_x * max_tile_size;
+ int green_to_red;
+ int green_to_blue;
+ int red_to_blue;
+ int all_x_max = tile_x_offset + max_tile_size;
+ int all_y_max = tile_y_offset + max_tile_size;
+ Multipliers best_tx;
+ MultipliersClear(&best_tx);
+ if (all_x_max > xsize) {
+ all_x_max = xsize;
+ }
+ if (all_y_max > ysize) {
+ all_y_max = ysize;
+ }
+
+ for (green_to_red = -64; green_to_red <= 64; green_to_red += halfstep) {
+ int histo[256] = { 0 };
+ int all_y;
+
+ for (all_y = tile_y_offset; all_y < all_y_max; ++all_y) {
+ int ix = all_y * xsize + tile_x_offset;
+ int all_x;
+ for (all_x = tile_x_offset; all_x < all_x_max; ++all_x, ++ix) {
+ if (SkipRepeatedPixels(argb, ix, xsize)) {
+ continue;
+ }
+ ++histo[TransformColorRed(green_to_red, argb[ix])]; // red.
+ }
+ }
+ cur_diff = PredictionCostCrossColor(&accumulated_red_histo[0], &histo[0]);
+ if ((uint8_t)green_to_red == prevX.green_to_red_) {
+ cur_diff -= 3; // favor keeping the areas locally similar
+ }
+ if ((uint8_t)green_to_red == prevY.green_to_red_) {
+ cur_diff -= 3; // favor keeping the areas locally similar
+ }
+ if (green_to_red == 0) {
+ cur_diff -= 3;
+ }
+ if (cur_diff < best_diff) {
+ best_diff = cur_diff;
+ best_tx.green_to_red_ = green_to_red;
+ }
+ }
+ best_diff = MAX_DIFF_COST;
+ for (green_to_blue = -32; green_to_blue <= 32; green_to_blue += step) {
+ for (red_to_blue = -32; red_to_blue <= 32; red_to_blue += step) {
+ int all_y;
+ int histo[256] = { 0 };
+ for (all_y = tile_y_offset; all_y < all_y_max; ++all_y) {
+ int all_x;
+ int ix = all_y * xsize + tile_x_offset;
+ for (all_x = tile_x_offset; all_x < all_x_max; ++all_x, ++ix) {
+ if (SkipRepeatedPixels(argb, ix, xsize)) {
+ continue;
+ }
+ ++histo[TransformColorBlue(green_to_blue, red_to_blue, argb[ix])];
+ }
+ }
+ cur_diff =
+ PredictionCostCrossColor(&accumulated_blue_histo[0], &histo[0]);
+ if ((uint8_t)green_to_blue == prevX.green_to_blue_) {
+ cur_diff -= 3; // favor keeping the areas locally similar
+ }
+ if ((uint8_t)green_to_blue == prevY.green_to_blue_) {
+ cur_diff -= 3; // favor keeping the areas locally similar
+ }
+ if ((uint8_t)red_to_blue == prevX.red_to_blue_) {
+ cur_diff -= 3; // favor keeping the areas locally similar
+ }
+ if ((uint8_t)red_to_blue == prevY.red_to_blue_) {
+ cur_diff -= 3; // favor keeping the areas locally similar
+ }
+ if (green_to_blue == 0) {
+ cur_diff -= 3;
+ }
+ if (red_to_blue == 0) {
+ cur_diff -= 3;
+ }
+ if (cur_diff < best_diff) {
+ best_diff = cur_diff;
+ best_tx.green_to_blue_ = green_to_blue;
+ best_tx.red_to_blue_ = red_to_blue;
+ }
+ }
+ }
+ return best_tx;
+}
+
+static void CopyTileWithColorTransform(int xsize, int ysize,
+ int tile_x, int tile_y, int bits,
+ Multipliers color_transform,
+ uint32_t* const argb) {
+ int y;
+ int xscan = 1 << bits;
+ int yscan = 1 << bits;
+ tile_x <<= bits;
+ tile_y <<= bits;
+ if (xscan > xsize - tile_x) {
+ xscan = xsize - tile_x;
+ }
+ if (yscan > ysize - tile_y) {
+ yscan = ysize - tile_y;
+ }
+ yscan += tile_y;
+ for (y = tile_y; y < yscan; ++y) {
+ int ix = y * xsize + tile_x;
+ const int end_ix = ix + xscan;
+ for (; ix < end_ix; ++ix) {
+ argb[ix] = TransformColor(&color_transform, argb[ix], 0);
+ }
+ }
+}
+
+void VP8LColorSpaceTransform(int width, int height, int bits, int step,
+ uint32_t* const argb, uint32_t* image) {
+ const int max_tile_size = 1 << bits;
+ int tile_xsize = VP8LSubSampleSize(width, bits);
+ int tile_ysize = VP8LSubSampleSize(height, bits);
+ int accumulated_red_histo[256] = { 0 };
+ int accumulated_blue_histo[256] = { 0 };
+ int tile_y;
+ int tile_x;
+ Multipliers prevX;
+ Multipliers prevY;
+ MultipliersClear(&prevY);
+ MultipliersClear(&prevX);
+ for (tile_y = 0; tile_y < tile_ysize; ++tile_y) {
+ for (tile_x = 0; tile_x < tile_xsize; ++tile_x) {
+ Multipliers color_transform;
+ int all_x_max;
+ int y;
+ const int tile_y_offset = tile_y * max_tile_size;
+ const int tile_x_offset = tile_x * max_tile_size;
+ if (tile_y != 0) {
+ ColorCodeToMultipliers(image[tile_y * tile_xsize + tile_x - 1], &prevX);
+ ColorCodeToMultipliers(image[(tile_y - 1) * tile_xsize + tile_x],
+ &prevY);
+ } else if (tile_x != 0) {
+ ColorCodeToMultipliers(image[tile_y * tile_xsize + tile_x - 1], &prevX);
+ }
+ color_transform =
+ GetBestColorTransformForTile(tile_x, tile_y, bits,
+ prevX, prevY,
+ step, width, height,
+ &accumulated_red_histo[0],
+ &accumulated_blue_histo[0],
+ argb);
+ image[tile_y * tile_xsize + tile_x] =
+ MultipliersToColorCode(&color_transform);
+ CopyTileWithColorTransform(width, height, tile_x, tile_y, bits,
+ color_transform, argb);
+
+ // Gather accumulated histogram data.
+ all_x_max = tile_x_offset + max_tile_size;
+ if (all_x_max > width) {
+ all_x_max = width;
+ }
+ for (y = 0; y < max_tile_size; ++y) {
+ int ix;
+ int all_x;
+ int all_y = tile_y_offset + y;
+ if (all_y >= height) {
+ break;
+ }
+ ix = all_y * width + tile_x_offset;
+ for (all_x = tile_x_offset; all_x < all_x_max; ++all_x, ++ix) {
+ if (ix >= 2 &&
+ argb[ix] == argb[ix - 2] &&
+ argb[ix] == argb[ix - 1]) {
+ continue; // repeated pixels are handled by backward references
+ }
+ if (ix >= width + 2 &&
+ argb[ix - 2] == argb[ix - width - 2] &&
+ argb[ix - 1] == argb[ix - width - 1] &&
+ argb[ix] == argb[ix - width]) {
+ continue; // repeated pixels are handled by backward references
+ }
+ ++accumulated_red_histo[(argb[ix] >> 16) & 0xff];
+ ++accumulated_blue_histo[argb[ix] & 0xff];
+ }
+ }
+ }
+ }
+}
+
+// Color space inverse transform.
+static void ColorSpaceInverseTransform(const VP8LTransform* const transform,
+ int y_start, int y_end, uint32_t* data) {
+ const int width = transform->xsize_;
+ const int mask = (1 << transform->bits_) - 1;
+ const int tiles_per_row = VP8LSubSampleSize(width, transform->bits_);
+ int y = y_start;
+ const uint32_t* pred_row =
+ transform->data_ + (y >> transform->bits_) * tiles_per_row;
+
+ while (y < y_end) {
+ const uint32_t* pred = pred_row;
+ Multipliers m = { 0, 0, 0 };
+ int x;
+
+ for (x = 0; x < width; ++x) {
+ if ((x & mask) == 0) ColorCodeToMultipliers(*pred++, &m);
+ data[x] = TransformColor(&m, data[x], 1);
+ }
+ data += width;
+ ++y;
+ if ((y & mask) == 0) pred_row += tiles_per_row;;
+ }
+}
+
+// Separate out pixels packed together using pixel-bundling.
+// We define two methods for ARGB data (uint32_t) and alpha-only data (uint8_t).
+#define COLOR_INDEX_INVERSE(FUNC_NAME, TYPE, GET_INDEX, GET_VALUE) \
+void FUNC_NAME(const VP8LTransform* const transform, \
+ int y_start, int y_end, const TYPE* src, TYPE* dst) { \
+ int y; \
+ const int bits_per_pixel = 8 >> transform->bits_; \
+ const int width = transform->xsize_; \
+ const uint32_t* const color_map = transform->data_; \
+ if (bits_per_pixel < 8) { \
+ const int pixels_per_byte = 1 << transform->bits_; \
+ const int count_mask = pixels_per_byte - 1; \
+ const uint32_t bit_mask = (1 << bits_per_pixel) - 1; \
+ for (y = y_start; y < y_end; ++y) { \
+ uint32_t packed_pixels = 0; \
+ int x; \
+ for (x = 0; x < width; ++x) { \
+ /* We need to load fresh 'packed_pixels' once every */ \
+ /* 'pixels_per_byte' increments of x. Fortunately, pixels_per_byte */ \
+ /* is a power of 2, so can just use a mask for that, instead of */ \
+ /* decrementing a counter. */ \
+ if ((x & count_mask) == 0) packed_pixels = GET_INDEX(*src++); \
+ *dst++ = GET_VALUE(color_map[packed_pixels & bit_mask]); \
+ packed_pixels >>= bits_per_pixel; \
+ } \
+ } \
+ } else { \
+ for (y = y_start; y < y_end; ++y) { \
+ int x; \
+ for (x = 0; x < width; ++x) { \
+ *dst++ = GET_VALUE(color_map[GET_INDEX(*src++)]); \
+ } \
+ } \
+ } \
+}
+
+static WEBP_INLINE uint32_t GetARGBIndex(uint32_t idx) {
+ return (idx >> 8) & 0xff;
+}
+
+static WEBP_INLINE uint8_t GetAlphaIndex(uint8_t idx) {
+ return idx;
+}
+
+static WEBP_INLINE uint32_t GetARGBValue(uint32_t val) {
+ return val;
+}
+
+static WEBP_INLINE uint8_t GetAlphaValue(uint32_t val) {
+ return (val >> 8) & 0xff;
+}
+
+static COLOR_INDEX_INVERSE(ColorIndexInverseTransform, uint32_t, GetARGBIndex,
+ GetARGBValue)
+COLOR_INDEX_INVERSE(VP8LColorIndexInverseTransformAlpha, uint8_t, GetAlphaIndex,
+ GetAlphaValue)
+
+#undef COLOR_INDEX_INVERSE
+
+void VP8LInverseTransform(const VP8LTransform* const transform,
+ int row_start, int row_end,
+ const uint32_t* const in, uint32_t* const out) {
+ const int width = transform->xsize_;
+ assert(row_start < row_end);
+ assert(row_end <= transform->ysize_);
+ switch (transform->type_) {
+ case SUBTRACT_GREEN:
+ VP8LAddGreenToBlueAndRed(out, out + (row_end - row_start) * width);
+ break;
+ case PREDICTOR_TRANSFORM:
+ PredictorInverseTransform(transform, row_start, row_end, out);
+ if (row_end != transform->ysize_) {
+ // The last predicted row in this iteration will be the top-pred row
+ // for the first row in next iteration.
+ memcpy(out - width, out + (row_end - row_start - 1) * width,
+ width * sizeof(*out));
+ }
+ break;
+ case CROSS_COLOR_TRANSFORM:
+ ColorSpaceInverseTransform(transform, row_start, row_end, out);
+ break;
+ case COLOR_INDEXING_TRANSFORM:
+ if (in == out && transform->bits_ > 0) {
+ // Move packed pixels to the end of unpacked region, so that unpacking
+ // can occur seamlessly.
+ // Also, note that this is the only transform that applies on
+ // the effective width of VP8LSubSampleSize(xsize_, bits_). All other
+ // transforms work on effective width of xsize_.
+ const int out_stride = (row_end - row_start) * width;
+ const int in_stride = (row_end - row_start) *
+ VP8LSubSampleSize(transform->xsize_, transform->bits_);
+ uint32_t* const src = out + out_stride - in_stride;
+ memmove(src, out, in_stride * sizeof(*src));
+ ColorIndexInverseTransform(transform, row_start, row_end, src, out);
+ } else {
+ ColorIndexInverseTransform(transform, row_start, row_end, in, out);
+ }
+ break;
+ }
+}
+
+//------------------------------------------------------------------------------
+// Color space conversion.
+
+static int is_big_endian(void) {
+ static const union {
+ uint16_t w;
+ uint8_t b[2];
+ } tmp = { 1 };
+ return (tmp.b[0] != 1);
+}
+
+static void ConvertBGRAToRGB(const uint32_t* src,
+ int num_pixels, uint8_t* dst) {
+ const uint32_t* const src_end = src + num_pixels;
+ while (src < src_end) {
+ const uint32_t argb = *src++;
+ *dst++ = (argb >> 16) & 0xff;
+ *dst++ = (argb >> 8) & 0xff;
+ *dst++ = (argb >> 0) & 0xff;
+ }
+}
+
+static void ConvertBGRAToRGBA(const uint32_t* src,
+ int num_pixels, uint8_t* dst) {
+ const uint32_t* const src_end = src + num_pixels;
+ while (src < src_end) {
+ const uint32_t argb = *src++;
+ *dst++ = (argb >> 16) & 0xff;
+ *dst++ = (argb >> 8) & 0xff;
+ *dst++ = (argb >> 0) & 0xff;
+ *dst++ = (argb >> 24) & 0xff;
+ }
+}
+
+static void ConvertBGRAToRGBA4444(const uint32_t* src,
+ int num_pixels, uint8_t* dst) {
+ const uint32_t* const src_end = src + num_pixels;
+ while (src < src_end) {
+ const uint32_t argb = *src++;
+ const uint8_t rg = ((argb >> 16) & 0xf0) | ((argb >> 12) & 0xf);
+ const uint8_t ba = ((argb >> 0) & 0xf0) | ((argb >> 28) & 0xf);
+#ifdef WEBP_SWAP_16BIT_CSP
+ *dst++ = ba;
+ *dst++ = rg;
+#else
+ *dst++ = rg;
+ *dst++ = ba;
+#endif
+ }
+}
+
+static void ConvertBGRAToRGB565(const uint32_t* src,
+ int num_pixels, uint8_t* dst) {
+ const uint32_t* const src_end = src + num_pixels;
+ while (src < src_end) {
+ const uint32_t argb = *src++;
+ const uint8_t rg = ((argb >> 16) & 0xf8) | ((argb >> 13) & 0x7);
+ const uint8_t gb = ((argb >> 5) & 0xe0) | ((argb >> 3) & 0x1f);
+#ifdef WEBP_SWAP_16BIT_CSP
+ *dst++ = gb;
+ *dst++ = rg;
+#else
+ *dst++ = rg;
+ *dst++ = gb;
+#endif
+ }
+}
+
+static void ConvertBGRAToBGR(const uint32_t* src,
+ int num_pixels, uint8_t* dst) {
+ const uint32_t* const src_end = src + num_pixels;
+ while (src < src_end) {
+ const uint32_t argb = *src++;
+ *dst++ = (argb >> 0) & 0xff;
+ *dst++ = (argb >> 8) & 0xff;
+ *dst++ = (argb >> 16) & 0xff;
+ }
+}
+
+static void CopyOrSwap(const uint32_t* src, int num_pixels, uint8_t* dst,
+ int swap_on_big_endian) {
+ if (is_big_endian() == swap_on_big_endian) {
+ const uint32_t* const src_end = src + num_pixels;
+ while (src < src_end) {
+ uint32_t argb = *src++;
+
+#if !defined(__BIG_ENDIAN__)
+#if !defined(WEBP_REFERENCE_IMPLEMENTATION)
+#if defined(__i386__) || defined(__x86_64__)
+ __asm__ volatile("bswap %0" : "=r"(argb) : "0"(argb));
+ *(uint32_t*)dst = argb;
+#elif defined(_MSC_VER)
+ argb = _byteswap_ulong(argb);
+ *(uint32_t*)dst = argb;
+#else
+ dst[0] = (argb >> 24) & 0xff;
+ dst[1] = (argb >> 16) & 0xff;
+ dst[2] = (argb >> 8) & 0xff;
+ dst[3] = (argb >> 0) & 0xff;
+#endif
+#else // WEBP_REFERENCE_IMPLEMENTATION
+ dst[0] = (argb >> 24) & 0xff;
+ dst[1] = (argb >> 16) & 0xff;
+ dst[2] = (argb >> 8) & 0xff;
+ dst[3] = (argb >> 0) & 0xff;
+#endif
+#else // __BIG_ENDIAN__
+ dst[0] = (argb >> 0) & 0xff;
+ dst[1] = (argb >> 8) & 0xff;
+ dst[2] = (argb >> 16) & 0xff;
+ dst[3] = (argb >> 24) & 0xff;
+#endif
+ dst += sizeof(argb);
+ }
+ } else {
+ memcpy(dst, src, num_pixels * sizeof(*src));
+ }
+}
+
+void VP8LConvertFromBGRA(const uint32_t* const in_data, int num_pixels,
+ WEBP_CSP_MODE out_colorspace, uint8_t* const rgba) {
+ switch (out_colorspace) {
+ case MODE_RGB:
+ ConvertBGRAToRGB(in_data, num_pixels, rgba);
+ break;
+ case MODE_RGBA:
+ ConvertBGRAToRGBA(in_data, num_pixels, rgba);
+ break;
+ case MODE_rgbA:
+ ConvertBGRAToRGBA(in_data, num_pixels, rgba);
+ WebPApplyAlphaMultiply(rgba, 0, num_pixels, 1, 0);
+ break;
+ case MODE_BGR:
+ ConvertBGRAToBGR(in_data, num_pixels, rgba);
+ break;
+ case MODE_BGRA:
+ CopyOrSwap(in_data, num_pixels, rgba, 1);
+ break;
+ case MODE_bgrA:
+ CopyOrSwap(in_data, num_pixels, rgba, 1);
+ WebPApplyAlphaMultiply(rgba, 0, num_pixels, 1, 0);
+ break;
+ case MODE_ARGB:
+ CopyOrSwap(in_data, num_pixels, rgba, 0);
+ break;
+ case MODE_Argb:
+ CopyOrSwap(in_data, num_pixels, rgba, 0);
+ WebPApplyAlphaMultiply(rgba, 1, num_pixels, 1, 0);
+ break;
+ case MODE_RGBA_4444:
+ ConvertBGRAToRGBA4444(in_data, num_pixels, rgba);
+ break;
+ case MODE_rgbA_4444:
+ ConvertBGRAToRGBA4444(in_data, num_pixels, rgba);
+ WebPApplyAlphaMultiply4444(rgba, num_pixels, 1, 0);
+ break;
+ case MODE_RGB_565:
+ ConvertBGRAToRGB565(in_data, num_pixels, rgba);
+ break;
+ default:
+ assert(0); // Code flow should not reach here.
+ }
+}
+
+// Bundles multiple (1, 2, 4 or 8) pixels into a single pixel.
+void VP8LBundleColorMap(const uint8_t* const row, int width,
+ int xbits, uint32_t* const dst) {
+ int x;
+ if (xbits > 0) {
+ const int bit_depth = 1 << (3 - xbits);
+ const int mask = (1 << xbits) - 1;
+ uint32_t code = 0xff000000;
+ for (x = 0; x < width; ++x) {
+ const int xsub = x & mask;
+ if (xsub == 0) {
+ code = 0xff000000;
+ }
+ code |= row[x] << (8 + bit_depth * xsub);
+ dst[x >> xbits] = code;
+ }
+ } else {
+ for (x = 0; x < width; ++x) dst[x] = 0xff000000 | (row[x] << 8);
+ }
+}
+
+//------------------------------------------------------------------------------
+
+// TODO(vikasa): Move the SSE2 functions to lossless_dsp.c (new file), once
+// color-space conversion methods (ConvertFromBGRA) are also updated for SSE2.
+#if defined(WEBP_USE_SSE2)
+static WEBP_INLINE uint32_t ClampedAddSubtractFullSSE2(uint32_t c0, uint32_t c1,
+ uint32_t c2) {
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i C0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c0), zero);
+ const __m128i C1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c1), zero);
+ const __m128i C2 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c2), zero);
+ const __m128i V1 = _mm_add_epi16(C0, C1);
+ const __m128i V2 = _mm_sub_epi16(V1, C2);
+ const __m128i b = _mm_packus_epi16(V2, V2);
+ const uint32_t output = _mm_cvtsi128_si32(b);
+ return output;
+}
+
+static WEBP_INLINE uint32_t ClampedAddSubtractHalfSSE2(uint32_t c0, uint32_t c1,
+ uint32_t c2) {
+ const uint32_t ave = Average2(c0, c1);
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i A0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(ave), zero);
+ const __m128i B0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c2), zero);
+ const __m128i A1 = _mm_sub_epi16(A0, B0);
+ const __m128i BgtA = _mm_cmpgt_epi16(B0, A0);
+ const __m128i A2 = _mm_sub_epi16(A1, BgtA);
+ const __m128i A3 = _mm_srai_epi16(A2, 1);
+ const __m128i A4 = _mm_add_epi16(A0, A3);
+ const __m128i A5 = _mm_packus_epi16(A4, A4);
+ const uint32_t output = _mm_cvtsi128_si32(A5);
+ return output;
+}
+
+static WEBP_INLINE uint32_t SelectSSE2(uint32_t a, uint32_t b, uint32_t c) {
+ int pa_minus_pb;
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i A0 = _mm_cvtsi32_si128(a);
+ const __m128i B0 = _mm_cvtsi32_si128(b);
+ const __m128i C0 = _mm_cvtsi32_si128(c);
+ const __m128i AC0 = _mm_subs_epu8(A0, C0);
+ const __m128i CA0 = _mm_subs_epu8(C0, A0);
+ const __m128i BC0 = _mm_subs_epu8(B0, C0);
+ const __m128i CB0 = _mm_subs_epu8(C0, B0);
+ const __m128i AC = _mm_or_si128(AC0, CA0);
+ const __m128i BC = _mm_or_si128(BC0, CB0);
+ const __m128i pa = _mm_unpacklo_epi8(AC, zero); // |a - c|
+ const __m128i pb = _mm_unpacklo_epi8(BC, zero); // |b - c|
+ const __m128i diff = _mm_sub_epi16(pb, pa);
+ {
+ int16_t out[8];
+ _mm_storeu_si128((__m128i*)out, diff);
+ pa_minus_pb = out[0] + out[1] + out[2] + out[3];
+ }
+ return (pa_minus_pb <= 0) ? a : b;
+}
+
+static void SubtractGreenFromBlueAndRedSSE2(uint32_t* argb_data, int num_pixs) {
+ int i = 0;
+ const __m128i mask = _mm_set1_epi32(0x0000ff00);
+ for (; i + 4 < num_pixs; i += 4) {
+ const __m128i in = _mm_loadu_si128((__m128i*)&argb_data[i]);
+ const __m128i in_00g0 = _mm_and_si128(in, mask); // 00g0|00g0|...
+ const __m128i in_0g00 = _mm_slli_epi32(in_00g0, 8); // 0g00|0g00|...
+ const __m128i in_000g = _mm_srli_epi32(in_00g0, 8); // 000g|000g|...
+ const __m128i in_0g0g = _mm_or_si128(in_0g00, in_000g);
+ const __m128i out = _mm_sub_epi8(in, in_0g0g);
+ _mm_storeu_si128((__m128i*)&argb_data[i], out);
+ }
+ // fallthrough and finish off with plain-C
+ for (; i < num_pixs; ++i) {
+ const uint32_t argb = argb_data[i];
+ const uint32_t green = (argb >> 8) & 0xff;
+ const uint32_t new_r = (((argb >> 16) & 0xff) - green) & 0xff;
+ const uint32_t new_b = ((argb & 0xff) - green) & 0xff;
+ argb_data[i] = (argb & 0xff00ff00) | (new_r << 16) | new_b;
+ }
+}
+
+static void AddGreenToBlueAndRedSSE2(uint32_t* data, const uint32_t* data_end) {
+ const __m128i mask = _mm_set1_epi32(0x0000ff00);
+ for (; data + 4 < data_end; data += 4) {
+ const __m128i in = _mm_loadu_si128((__m128i*)data);
+ const __m128i in_00g0 = _mm_and_si128(in, mask); // 00g0|00g0|...
+ const __m128i in_0g00 = _mm_slli_epi32(in_00g0, 8); // 0g00|0g00|...
+ const __m128i in_000g = _mm_srli_epi32(in_00g0, 8); // 000g|000g|...
+ const __m128i in_0g0g = _mm_or_si128(in_0g00, in_000g);
+ const __m128i out = _mm_add_epi8(in, in_0g0g);
+ _mm_storeu_si128((__m128i*)data, out);
+ }
+ // fallthrough and finish off with plain-C
+ while (data < data_end) {
+ const uint32_t argb = *data;
+ const uint32_t green = ((argb >> 8) & 0xff);
+ uint32_t red_blue = (argb & 0x00ff00ffu);
+ red_blue += (green << 16) | green;
+ red_blue &= 0x00ff00ffu;
+ *data++ = (argb & 0xff00ff00u) | red_blue;
+ }
+}
+
+extern void VP8LDspInitSSE2(void);
+
+void VP8LDspInitSSE2(void) {
+ VP8LClampedAddSubtractFull = ClampedAddSubtractFullSSE2;
+ VP8LClampedAddSubtractHalf = ClampedAddSubtractHalfSSE2;
+ VP8LSelect = SelectSSE2;
+ VP8LSubtractGreenFromBlueAndRed = SubtractGreenFromBlueAndRedSSE2;
+ VP8LAddGreenToBlueAndRed = AddGreenToBlueAndRedSSE2;
+}
+#endif
+//------------------------------------------------------------------------------
+
+VP8LPredClampedAddSubFunc VP8LClampedAddSubtractFull;
+VP8LPredClampedAddSubFunc VP8LClampedAddSubtractHalf;
+VP8LPredSelectFunc VP8LSelect;
+VP8LSubtractGreenFromBlueAndRedFunc VP8LSubtractGreenFromBlueAndRed;
+VP8LAddGreenToBlueAndRedFunc VP8LAddGreenToBlueAndRed;
+
+void VP8LDspInit(void) {
+ VP8LClampedAddSubtractFull = ClampedAddSubtractFull;
+ VP8LClampedAddSubtractHalf = ClampedAddSubtractHalf;
+ VP8LSelect = Select;
+ VP8LSubtractGreenFromBlueAndRed = SubtractGreenFromBlueAndRed;
+ VP8LAddGreenToBlueAndRed = AddGreenToBlueAndRed;
+
+ // If defined, use CPUInfo() to overwrite some pointers with faster versions.
+ if (VP8GetCPUInfo != NULL) {
+#if defined(WEBP_USE_SSE2)
+ if (VP8GetCPUInfo(kSSE2)) {
+ VP8LDspInitSSE2();
+ }
+#endif
+ }
+}
+
+//------------------------------------------------------------------------------
+
diff --git a/drivers/webp/dsp/lossless.h b/drivers/webp/dsp/lossless.h
new file mode 100644
index 000000000..0f1d44200
--- /dev/null
+++ b/drivers/webp/dsp/lossless.h
@@ -0,0 +1,220 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the COPYING file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+// -----------------------------------------------------------------------------
+//
+// Image transforms and color space conversion methods for lossless decoder.
+//
+// Authors: Vikas Arora (vikaas.arora@gmail.com)
+// Jyrki Alakuijala (jyrki@google.com)
+
+#ifndef WEBP_DSP_LOSSLESS_H_
+#define WEBP_DSP_LOSSLESS_H_
+
+#include "../webp/types.h"
+#include "../webp/decode.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+//------------------------------------------------------------------------------
+//
+
+typedef uint32_t (*VP8LPredClampedAddSubFunc)(uint32_t c0, uint32_t c1,
+ uint32_t c2);
+typedef uint32_t (*VP8LPredSelectFunc)(uint32_t c0, uint32_t c1, uint32_t c2);
+typedef void (*VP8LSubtractGreenFromBlueAndRedFunc)(uint32_t* argb_data,
+ int num_pixs);
+typedef void (*VP8LAddGreenToBlueAndRedFunc)(uint32_t* data_start,
+ const uint32_t* data_end);
+
+extern VP8LPredClampedAddSubFunc VP8LClampedAddSubtractFull;
+extern VP8LPredClampedAddSubFunc VP8LClampedAddSubtractHalf;
+extern VP8LPredSelectFunc VP8LSelect;
+extern VP8LSubtractGreenFromBlueAndRedFunc VP8LSubtractGreenFromBlueAndRed;
+extern VP8LAddGreenToBlueAndRedFunc VP8LAddGreenToBlueAndRed;
+
+// Must be called before calling any of the above methods.
+void VP8LDspInit(void);
+
+//------------------------------------------------------------------------------
+// Image transforms.
+
+struct VP8LTransform; // Defined in dec/vp8li.h.
+
+// Performs inverse transform of data given transform information, start and end
+// rows. Transform will be applied to rows [row_start, row_end[.
+// The *in and *out pointers refer to source and destination data respectively
+// corresponding to the intermediate row (row_start).
+void VP8LInverseTransform(const struct VP8LTransform* const transform,
+ int row_start, int row_end,
+ const uint32_t* const in, uint32_t* const out);
+
+// Similar to the static method ColorIndexInverseTransform() that is part of
+// lossless.c, but used only for alpha decoding. It takes uint8_t (rather than
+// uint32_t) arguments for 'src' and 'dst'.
+void VP8LColorIndexInverseTransformAlpha(
+ const struct VP8LTransform* const transform, int y_start, int y_end,
+ const uint8_t* src, uint8_t* dst);
+
+void VP8LResidualImage(int width, int height, int bits,
+ uint32_t* const argb, uint32_t* const argb_scratch,
+ uint32_t* const image);
+
+void VP8LColorSpaceTransform(int width, int height, int bits, int step,
+ uint32_t* const argb, uint32_t* image);
+
+//------------------------------------------------------------------------------
+// Color space conversion.
+
+// Converts from BGRA to other color spaces.
+void VP8LConvertFromBGRA(const uint32_t* const in_data, int num_pixels,
+ WEBP_CSP_MODE out_colorspace, uint8_t* const rgba);
+
+//------------------------------------------------------------------------------
+// Misc methods.
+
+// Computes sampled size of 'size' when sampling using 'sampling bits'.
+static WEBP_INLINE uint32_t VP8LSubSampleSize(uint32_t size,
+ uint32_t sampling_bits) {
+ return (size + (1 << sampling_bits) - 1) >> sampling_bits;
+}
+
+// Faster logarithm for integers. Small values use a look-up table.
+#define LOG_LOOKUP_IDX_MAX 256
+extern const float kLog2Table[LOG_LOOKUP_IDX_MAX];
+extern const float kSLog2Table[LOG_LOOKUP_IDX_MAX];
+float VP8LFastLog2Slow(int v);
+float VP8LFastSLog2Slow(int v);
+static WEBP_INLINE float VP8LFastLog2(int v) {
+ return (v < LOG_LOOKUP_IDX_MAX) ? kLog2Table[v] : VP8LFastLog2Slow(v);
+}
+// Fast calculation of v * log2(v) for integer input.
+static WEBP_INLINE float VP8LFastSLog2(int v) {
+ return (v < LOG_LOOKUP_IDX_MAX) ? kSLog2Table[v] : VP8LFastSLog2Slow(v);
+}
+
+// -----------------------------------------------------------------------------
+// PrefixEncode()
+
+// use GNU builtins where available.
+#if defined(__GNUC__) && \
+ ((__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || __GNUC__ >= 4)
+static WEBP_INLINE int BitsLog2Floor(uint32_t n) {
+ return 31 ^ __builtin_clz(n);
+}
+#elif defined(_MSC_VER) && _MSC_VER > 1310 && \
+ (defined(_M_X64) || defined(_M_IX86))
+#include <intrin.h>
+#pragma intrinsic(_BitScanReverse)
+
+static WEBP_INLINE int BitsLog2Floor(uint32_t n) {
+ unsigned long first_set_bit;
+ _BitScanReverse(&first_set_bit, n);
+ return first_set_bit;
+}
+#else
+// Returns (int)floor(log2(n)). n must be > 0.
+static WEBP_INLINE int BitsLog2Floor(uint32_t n) {
+ int log = 0;
+ uint32_t value = n;
+ int i;
+
+ for (i = 4; i >= 0; --i) {
+ const int shift = (1 << i);
+ const uint32_t x = value >> shift;
+ if (x != 0) {
+ value = x;
+ log += shift;
+ }
+ }
+ return log;
+}
+#endif
+
+static WEBP_INLINE int VP8LBitsLog2Ceiling(uint32_t n) {
+ const int log_floor = BitsLog2Floor(n);
+ if (n == (n & ~(n - 1))) // zero or a power of two.
+ return log_floor;
+ else
+ return log_floor + 1;
+}
+
+// Splitting of distance and length codes into prefixes and
+// extra bits. The prefixes are encoded with an entropy code
+// while the extra bits are stored just as normal bits.
+static WEBP_INLINE void VP8LPrefixEncodeBitsNoLUT(int distance, int* const code,
+ int* const extra_bits) {
+ const int highest_bit = BitsLog2Floor(--distance);
+ const int second_highest_bit = (distance >> (highest_bit - 1)) & 1;
+ *extra_bits = highest_bit - 1;
+ *code = 2 * highest_bit + second_highest_bit;
+}
+
+static WEBP_INLINE void VP8LPrefixEncodeNoLUT(int distance, int* const code,
+ int* const extra_bits,
+ int* const extra_bits_value) {
+ const int highest_bit = BitsLog2Floor(--distance);
+ const int second_highest_bit = (distance >> (highest_bit - 1)) & 1;
+ *extra_bits = highest_bit - 1;
+ *extra_bits_value = distance & ((1 << *extra_bits) - 1);
+ *code = 2 * highest_bit + second_highest_bit;
+}
+
+#define PREFIX_LOOKUP_IDX_MAX 512
+typedef struct {
+ int8_t code_;
+ int8_t extra_bits_;
+} VP8LPrefixCode;
+
+// These tables are derived using VP8LPrefixEncodeNoLUT.
+extern const VP8LPrefixCode kPrefixEncodeCode[PREFIX_LOOKUP_IDX_MAX];
+extern const uint8_t kPrefixEncodeExtraBitsValue[PREFIX_LOOKUP_IDX_MAX];
+static WEBP_INLINE void VP8LPrefixEncodeBits(int distance, int* const code,
+ int* const extra_bits) {
+ if (distance < PREFIX_LOOKUP_IDX_MAX) {
+ const VP8LPrefixCode prefix_code = kPrefixEncodeCode[distance];
+ *code = prefix_code.code_;
+ *extra_bits = prefix_code.extra_bits_;
+ } else {
+ VP8LPrefixEncodeBitsNoLUT(distance, code, extra_bits);
+ }
+}
+
+static WEBP_INLINE void VP8LPrefixEncode(int distance, int* const code,
+ int* const extra_bits,
+ int* const extra_bits_value) {
+ if (distance < PREFIX_LOOKUP_IDX_MAX) {
+ const VP8LPrefixCode prefix_code = kPrefixEncodeCode[distance];
+ *code = prefix_code.code_;
+ *extra_bits = prefix_code.extra_bits_;
+ *extra_bits_value = kPrefixEncodeExtraBitsValue[distance];
+ } else {
+ VP8LPrefixEncodeNoLUT(distance, code, extra_bits, extra_bits_value);
+ }
+}
+
+// In-place difference of each component with mod 256.
+static WEBP_INLINE uint32_t VP8LSubPixels(uint32_t a, uint32_t b) {
+ const uint32_t alpha_and_green =
+ 0x00ff00ffu + (a & 0xff00ff00u) - (b & 0xff00ff00u);
+ const uint32_t red_and_blue =
+ 0xff00ff00u + (a & 0x00ff00ffu) - (b & 0x00ff00ffu);
+ return (alpha_and_green & 0xff00ff00u) | (red_and_blue & 0x00ff00ffu);
+}
+
+void VP8LBundleColorMap(const uint8_t* const row, int width,
+ int xbits, uint32_t* const dst);
+
+//------------------------------------------------------------------------------
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // WEBP_DSP_LOSSLESS_H_
diff --git a/drivers/webp/dsp/upsampling.c b/drivers/webp/dsp/upsampling.c
new file mode 100644
index 000000000..978e3ce25
--- /dev/null
+++ b/drivers/webp/dsp/upsampling.c
@@ -0,0 +1,366 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the COPYING file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+// -----------------------------------------------------------------------------
+//
+// YUV to RGB upsampling functions.
+//
+// Author: somnath@google.com (Somnath Banerjee)
+
+#include "./dsp.h"
+#include "./yuv.h"
+
+#include <assert.h>
+
+//------------------------------------------------------------------------------
+// Fancy upsampler
+
+#ifdef FANCY_UPSAMPLING
+
+// Fancy upsampling functions to convert YUV to RGB
+WebPUpsampleLinePairFunc WebPUpsamplers[MODE_LAST];
+
+// Given samples laid out in a square as:
+// [a b]
+// [c d]
+// we interpolate u/v as:
+// ([9*a + 3*b + 3*c + d 3*a + 9*b + 3*c + d] + [8 8]) / 16
+// ([3*a + b + 9*c + 3*d a + 3*b + 3*c + 9*d] [8 8]) / 16
+
+// We process u and v together stashed into 32bit (16bit each).
+#define LOAD_UV(u, v) ((u) | ((v) << 16))
+
+#define UPSAMPLE_FUNC(FUNC_NAME, FUNC, XSTEP) \
+static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
+ const uint8_t* top_u, const uint8_t* top_v, \
+ const uint8_t* cur_u, const uint8_t* cur_v, \
+ uint8_t* top_dst, uint8_t* bottom_dst, int len) { \
+ int x; \
+ const int last_pixel_pair = (len - 1) >> 1; \
+ uint32_t tl_uv = LOAD_UV(top_u[0], top_v[0]); /* top-left sample */ \
+ uint32_t l_uv = LOAD_UV(cur_u[0], cur_v[0]); /* left-sample */ \
+ assert(top_y != NULL); \
+ { \
+ const uint32_t uv0 = (3 * tl_uv + l_uv + 0x00020002u) >> 2; \
+ FUNC(top_y[0], uv0 & 0xff, (uv0 >> 16), top_dst); \
+ } \
+ if (bottom_y != NULL) { \
+ const uint32_t uv0 = (3 * l_uv + tl_uv + 0x00020002u) >> 2; \
+ FUNC(bottom_y[0], uv0 & 0xff, (uv0 >> 16), bottom_dst); \
+ } \
+ for (x = 1; x <= last_pixel_pair; ++x) { \
+ const uint32_t t_uv = LOAD_UV(top_u[x], top_v[x]); /* top sample */ \
+ const uint32_t uv = LOAD_UV(cur_u[x], cur_v[x]); /* sample */ \
+ /* precompute invariant values associated with first and second diagonals*/\
+ const uint32_t avg = tl_uv + t_uv + l_uv + uv + 0x00080008u; \
+ const uint32_t diag_12 = (avg + 2 * (t_uv + l_uv)) >> 3; \
+ const uint32_t diag_03 = (avg + 2 * (tl_uv + uv)) >> 3; \
+ { \
+ const uint32_t uv0 = (diag_12 + tl_uv) >> 1; \
+ const uint32_t uv1 = (diag_03 + t_uv) >> 1; \
+ FUNC(top_y[2 * x - 1], uv0 & 0xff, (uv0 >> 16), \
+ top_dst + (2 * x - 1) * XSTEP); \
+ FUNC(top_y[2 * x - 0], uv1 & 0xff, (uv1 >> 16), \
+ top_dst + (2 * x - 0) * XSTEP); \
+ } \
+ if (bottom_y != NULL) { \
+ const uint32_t uv0 = (diag_03 + l_uv) >> 1; \
+ const uint32_t uv1 = (diag_12 + uv) >> 1; \
+ FUNC(bottom_y[2 * x - 1], uv0 & 0xff, (uv0 >> 16), \
+ bottom_dst + (2 * x - 1) * XSTEP); \
+ FUNC(bottom_y[2 * x + 0], uv1 & 0xff, (uv1 >> 16), \
+ bottom_dst + (2 * x + 0) * XSTEP); \
+ } \
+ tl_uv = t_uv; \
+ l_uv = uv; \
+ } \
+ if (!(len & 1)) { \
+ { \
+ const uint32_t uv0 = (3 * tl_uv + l_uv + 0x00020002u) >> 2; \
+ FUNC(top_y[len - 1], uv0 & 0xff, (uv0 >> 16), \
+ top_dst + (len - 1) * XSTEP); \
+ } \
+ if (bottom_y != NULL) { \
+ const uint32_t uv0 = (3 * l_uv + tl_uv + 0x00020002u) >> 2; \
+ FUNC(bottom_y[len - 1], uv0 & 0xff, (uv0 >> 16), \
+ bottom_dst + (len - 1) * XSTEP); \
+ } \
+ } \
+}
+
+// All variants implemented.
+UPSAMPLE_FUNC(UpsampleRgbLinePair, VP8YuvToRgb, 3)
+UPSAMPLE_FUNC(UpsampleBgrLinePair, VP8YuvToBgr, 3)
+UPSAMPLE_FUNC(UpsampleRgbaLinePair, VP8YuvToRgba, 4)
+UPSAMPLE_FUNC(UpsampleBgraLinePair, VP8YuvToBgra, 4)
+UPSAMPLE_FUNC(UpsampleArgbLinePair, VP8YuvToArgb, 4)
+UPSAMPLE_FUNC(UpsampleRgba4444LinePair, VP8YuvToRgba4444, 2)
+UPSAMPLE_FUNC(UpsampleRgb565LinePair, VP8YuvToRgb565, 2)
+
+#undef LOAD_UV
+#undef UPSAMPLE_FUNC
+
+#endif // FANCY_UPSAMPLING
+
+//------------------------------------------------------------------------------
+// simple point-sampling
+
+#define SAMPLE_FUNC(FUNC_NAME, FUNC, XSTEP) \
+static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
+ const uint8_t* u, const uint8_t* v, \
+ uint8_t* top_dst, uint8_t* bottom_dst, int len) { \
+ int i; \
+ for (i = 0; i < len - 1; i += 2) { \
+ FUNC(top_y[0], u[0], v[0], top_dst); \
+ FUNC(top_y[1], u[0], v[0], top_dst + XSTEP); \
+ FUNC(bottom_y[0], u[0], v[0], bottom_dst); \
+ FUNC(bottom_y[1], u[0], v[0], bottom_dst + XSTEP); \
+ top_y += 2; \
+ bottom_y += 2; \
+ u++; \
+ v++; \
+ top_dst += 2 * XSTEP; \
+ bottom_dst += 2 * XSTEP; \
+ } \
+ if (i == len - 1) { /* last one */ \
+ FUNC(top_y[0], u[0], v[0], top_dst); \
+ FUNC(bottom_y[0], u[0], v[0], bottom_dst); \
+ } \
+}
+
+// All variants implemented.
+SAMPLE_FUNC(SampleRgbLinePair, VP8YuvToRgb, 3)
+SAMPLE_FUNC(SampleBgrLinePair, VP8YuvToBgr, 3)
+SAMPLE_FUNC(SampleRgbaLinePair, VP8YuvToRgba, 4)
+SAMPLE_FUNC(SampleBgraLinePair, VP8YuvToBgra, 4)
+SAMPLE_FUNC(SampleArgbLinePair, VP8YuvToArgb, 4)
+SAMPLE_FUNC(SampleRgba4444LinePair, VP8YuvToRgba4444, 2)
+SAMPLE_FUNC(SampleRgb565LinePair, VP8YuvToRgb565, 2)
+
+#undef SAMPLE_FUNC
+
+const WebPSampleLinePairFunc WebPSamplers[MODE_LAST] = {
+ SampleRgbLinePair, // MODE_RGB
+ SampleRgbaLinePair, // MODE_RGBA
+ SampleBgrLinePair, // MODE_BGR
+ SampleBgraLinePair, // MODE_BGRA
+ SampleArgbLinePair, // MODE_ARGB
+ SampleRgba4444LinePair, // MODE_RGBA_4444
+ SampleRgb565LinePair, // MODE_RGB_565
+ SampleRgbaLinePair, // MODE_rgbA
+ SampleBgraLinePair, // MODE_bgrA
+ SampleArgbLinePair, // MODE_Argb
+ SampleRgba4444LinePair // MODE_rgbA_4444
+};
+
+//------------------------------------------------------------------------------
+
+#if !defined(FANCY_UPSAMPLING)
+#define DUAL_SAMPLE_FUNC(FUNC_NAME, FUNC) \
+static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bot_y, \
+ const uint8_t* top_u, const uint8_t* top_v, \
+ const uint8_t* bot_u, const uint8_t* bot_v, \
+ uint8_t* top_dst, uint8_t* bot_dst, int len) { \
+ const int half_len = len >> 1; \
+ int x; \
+ assert(top_dst != NULL); \
+ { \
+ for (x = 0; x < half_len; ++x) { \
+ FUNC(top_y[2 * x + 0], top_u[x], top_v[x], top_dst + 8 * x + 0); \
+ FUNC(top_y[2 * x + 1], top_u[x], top_v[x], top_dst + 8 * x + 4); \
+ } \
+ if (len & 1) FUNC(top_y[2 * x + 0], top_u[x], top_v[x], top_dst + 8 * x); \
+ } \
+ if (bot_dst != NULL) { \
+ for (x = 0; x < half_len; ++x) { \
+ FUNC(bot_y[2 * x + 0], bot_u[x], bot_v[x], bot_dst + 8 * x + 0); \
+ FUNC(bot_y[2 * x + 1], bot_u[x], bot_v[x], bot_dst + 8 * x + 4); \
+ } \
+ if (len & 1) FUNC(bot_y[2 * x + 0], bot_u[x], bot_v[x], bot_dst + 8 * x); \
+ } \
+}
+
+DUAL_SAMPLE_FUNC(DualLineSamplerBGRA, VP8YuvToBgra)
+DUAL_SAMPLE_FUNC(DualLineSamplerARGB, VP8YuvToArgb)
+#undef DUAL_SAMPLE_FUNC
+
+#endif // !FANCY_UPSAMPLING
+
+WebPUpsampleLinePairFunc WebPGetLinePairConverter(int alpha_is_last) {
+ WebPInitUpsamplers();
+ VP8YUVInit();
+#ifdef FANCY_UPSAMPLING
+ return WebPUpsamplers[alpha_is_last ? MODE_BGRA : MODE_ARGB];
+#else
+ return (alpha_is_last ? DualLineSamplerBGRA : DualLineSamplerARGB);
+#endif
+}
+
+//------------------------------------------------------------------------------
+// YUV444 converter
+
+#define YUV444_FUNC(FUNC_NAME, FUNC, XSTEP) \
+static void FUNC_NAME(const uint8_t* y, const uint8_t* u, const uint8_t* v, \
+ uint8_t* dst, int len) { \
+ int i; \
+ for (i = 0; i < len; ++i) FUNC(y[i], u[i], v[i], &dst[i * XSTEP]); \
+}
+
+YUV444_FUNC(Yuv444ToRgb, VP8YuvToRgb, 3)
+YUV444_FUNC(Yuv444ToBgr, VP8YuvToBgr, 3)
+YUV444_FUNC(Yuv444ToRgba, VP8YuvToRgba, 4)
+YUV444_FUNC(Yuv444ToBgra, VP8YuvToBgra, 4)
+YUV444_FUNC(Yuv444ToArgb, VP8YuvToArgb, 4)
+YUV444_FUNC(Yuv444ToRgba4444, VP8YuvToRgba4444, 2)
+YUV444_FUNC(Yuv444ToRgb565, VP8YuvToRgb565, 2)
+
+#undef YUV444_FUNC
+
+const WebPYUV444Converter WebPYUV444Converters[MODE_LAST] = {
+ Yuv444ToRgb, // MODE_RGB
+ Yuv444ToRgba, // MODE_RGBA
+ Yuv444ToBgr, // MODE_BGR
+ Yuv444ToBgra, // MODE_BGRA
+ Yuv444ToArgb, // MODE_ARGB
+ Yuv444ToRgba4444, // MODE_RGBA_4444
+ Yuv444ToRgb565, // MODE_RGB_565
+ Yuv444ToRgba, // MODE_rgbA
+ Yuv444ToBgra, // MODE_bgrA
+ Yuv444ToArgb, // MODE_Argb
+ Yuv444ToRgba4444 // MODE_rgbA_4444
+};
+
+//------------------------------------------------------------------------------
+// Premultiplied modes
+
+// non dithered-modes
+
+// (x * a * 32897) >> 23 is bit-wise equivalent to (int)(x * a / 255.)
+// for all 8bit x or a. For bit-wise equivalence to (int)(x * a / 255. + .5),
+// one can use instead: (x * a * 65793 + (1 << 23)) >> 24
+#if 1 // (int)(x * a / 255.)
+#define MULTIPLIER(a) ((a) * 32897UL)
+#define PREMULTIPLY(x, m) (((x) * (m)) >> 23)
+#else // (int)(x * a / 255. + .5)
+#define MULTIPLIER(a) ((a) * 65793UL)
+#define PREMULTIPLY(x, m) (((x) * (m) + (1UL << 23)) >> 24)
+#endif
+
+static void ApplyAlphaMultiply(uint8_t* rgba, int alpha_first,
+ int w, int h, int stride) {
+ while (h-- > 0) {
+ uint8_t* const rgb = rgba + (alpha_first ? 1 : 0);
+ const uint8_t* const alpha = rgba + (alpha_first ? 0 : 3);
+ int i;
+ for (i = 0; i < w; ++i) {
+ const uint32_t a = alpha[4 * i];
+ if (a != 0xff) {
+ const uint32_t mult = MULTIPLIER(a);
+ rgb[4 * i + 0] = PREMULTIPLY(rgb[4 * i + 0], mult);
+ rgb[4 * i + 1] = PREMULTIPLY(rgb[4 * i + 1], mult);
+ rgb[4 * i + 2] = PREMULTIPLY(rgb[4 * i + 2], mult);
+ }
+ }
+ rgba += stride;
+ }
+}
+#undef MULTIPLIER
+#undef PREMULTIPLY
+
+// rgbA4444
+
+#define MULTIPLIER(a) ((a) * 0x1111) // 0x1111 ~= (1 << 16) / 15
+
+static WEBP_INLINE uint8_t dither_hi(uint8_t x) {
+ return (x & 0xf0) | (x >> 4);
+}
+
+static WEBP_INLINE uint8_t dither_lo(uint8_t x) {
+ return (x & 0x0f) | (x << 4);
+}
+
+static WEBP_INLINE uint8_t multiply(uint8_t x, uint32_t m) {
+ return (x * m) >> 16;
+}
+
+static void ApplyAlphaMultiply4444(uint8_t* rgba4444,
+ int w, int h, int stride) {
+ while (h-- > 0) {
+ int i;
+ for (i = 0; i < w; ++i) {
+ const uint8_t a = (rgba4444[2 * i + 1] & 0x0f);
+ const uint32_t mult = MULTIPLIER(a);
+ const uint8_t r = multiply(dither_hi(rgba4444[2 * i + 0]), mult);
+ const uint8_t g = multiply(dither_lo(rgba4444[2 * i + 0]), mult);
+ const uint8_t b = multiply(dither_hi(rgba4444[2 * i + 1]), mult);
+ rgba4444[2 * i + 0] = (r & 0xf0) | ((g >> 4) & 0x0f);
+ rgba4444[2 * i + 1] = (b & 0xf0) | a;
+ }
+ rgba4444 += stride;
+ }
+}
+#undef MULTIPLIER
+
+void (*WebPApplyAlphaMultiply)(uint8_t*, int, int, int, int)
+ = ApplyAlphaMultiply;
+void (*WebPApplyAlphaMultiply4444)(uint8_t*, int, int, int)
+ = ApplyAlphaMultiply4444;
+
+//------------------------------------------------------------------------------
+// Main call
+
+void WebPInitUpsamplers(void) {
+#ifdef FANCY_UPSAMPLING
+ WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePair;
+ WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePair;
+ WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePair;
+ WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePair;
+ WebPUpsamplers[MODE_ARGB] = UpsampleArgbLinePair;
+ WebPUpsamplers[MODE_RGBA_4444] = UpsampleRgba4444LinePair;
+ WebPUpsamplers[MODE_RGB_565] = UpsampleRgb565LinePair;
+
+ // If defined, use CPUInfo() to overwrite some pointers with faster versions.
+ if (VP8GetCPUInfo != NULL) {
+#if defined(WEBP_USE_SSE2)
+ if (VP8GetCPUInfo(kSSE2)) {
+ WebPInitUpsamplersSSE2();
+ }
+#endif
+#if defined(WEBP_USE_NEON)
+ if (VP8GetCPUInfo(kNEON)) {
+ WebPInitUpsamplersNEON();
+ }
+#endif
+ }
+#endif // FANCY_UPSAMPLING
+}
+
+void WebPInitPremultiply(void) {
+ WebPApplyAlphaMultiply = ApplyAlphaMultiply;
+ WebPApplyAlphaMultiply4444 = ApplyAlphaMultiply4444;
+
+#ifdef FANCY_UPSAMPLING
+ WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePair;
+ WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePair;
+ WebPUpsamplers[MODE_Argb] = UpsampleArgbLinePair;
+ WebPUpsamplers[MODE_rgbA_4444] = UpsampleRgba4444LinePair;
+
+ if (VP8GetCPUInfo != NULL) {
+#if defined(WEBP_USE_SSE2)
+ if (VP8GetCPUInfo(kSSE2)) {
+ WebPInitPremultiplySSE2();
+ }
+#endif
+#if defined(WEBP_USE_NEON)
+ if (VP8GetCPUInfo(kNEON)) {
+ WebPInitPremultiplyNEON();
+ }
+#endif
+ }
+#endif // FANCY_UPSAMPLING
+}
+
diff --git a/drivers/webp/dsp/upsampling_neon.c b/drivers/webp/dsp/upsampling_neon.c
new file mode 100644
index 000000000..791222f81
--- /dev/null
+++ b/drivers/webp/dsp/upsampling_neon.c
@@ -0,0 +1,265 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the COPYING file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+// -----------------------------------------------------------------------------
+//
+// NEON version of YUV to RGB upsampling functions.
+//
+// Author: mans@mansr.com (Mans Rullgard)
+// Based on SSE code by: somnath@google.com (Somnath Banerjee)
+
+#include "./dsp.h"
+
+#if defined(WEBP_USE_NEON)
+
+#include <assert.h>
+#include <arm_neon.h>
+#include <string.h>
+#include "./yuv.h"
+
+#ifdef FANCY_UPSAMPLING
+
+//-----------------------------------------------------------------------------
+// U/V upsampling
+
+// Loads 9 pixels each from rows r1 and r2 and generates 16 pixels.
+#define UPSAMPLE_16PIXELS(r1, r2, out) { \
+ uint8x8_t a = vld1_u8(r1); \
+ uint8x8_t b = vld1_u8(r1 + 1); \
+ uint8x8_t c = vld1_u8(r2); \
+ uint8x8_t d = vld1_u8(r2 + 1); \
+ \
+ uint16x8_t al = vshll_n_u8(a, 1); \
+ uint16x8_t bl = vshll_n_u8(b, 1); \
+ uint16x8_t cl = vshll_n_u8(c, 1); \
+ uint16x8_t dl = vshll_n_u8(d, 1); \
+ \
+ uint8x8_t diag1, diag2; \
+ uint16x8_t sl; \
+ \
+ /* a + b + c + d */ \
+ sl = vaddl_u8(a, b); \
+ sl = vaddw_u8(sl, c); \
+ sl = vaddw_u8(sl, d); \
+ \
+ al = vaddq_u16(sl, al); /* 3a + b + c + d */ \
+ bl = vaddq_u16(sl, bl); /* a + 3b + c + d */ \
+ \
+ al = vaddq_u16(al, dl); /* 3a + b + c + 3d */ \
+ bl = vaddq_u16(bl, cl); /* a + 3b + 3c + d */ \
+ \
+ diag2 = vshrn_n_u16(al, 3); \
+ diag1 = vshrn_n_u16(bl, 3); \
+ \
+ a = vrhadd_u8(a, diag1); \
+ b = vrhadd_u8(b, diag2); \
+ c = vrhadd_u8(c, diag2); \
+ d = vrhadd_u8(d, diag1); \
+ \
+ { \
+ const uint8x8x2_t a_b = {{ a, b }}; \
+ const uint8x8x2_t c_d = {{ c, d }}; \
+ vst2_u8(out, a_b); \
+ vst2_u8(out + 32, c_d); \
+ } \
+}
+
+// Turn the macro into a function for reducing code-size when non-critical
+static void Upsample16Pixels(const uint8_t *r1, const uint8_t *r2,
+ uint8_t *out) {
+ UPSAMPLE_16PIXELS(r1, r2, out);
+}
+
+#define UPSAMPLE_LAST_BLOCK(tb, bb, num_pixels, out) { \
+ uint8_t r1[9], r2[9]; \
+ memcpy(r1, (tb), (num_pixels)); \
+ memcpy(r2, (bb), (num_pixels)); \
+ /* replicate last byte */ \
+ memset(r1 + (num_pixels), r1[(num_pixels) - 1], 9 - (num_pixels)); \
+ memset(r2 + (num_pixels), r2[(num_pixels) - 1], 9 - (num_pixels)); \
+ Upsample16Pixels(r1, r2, out); \
+}
+
+//-----------------------------------------------------------------------------
+// YUV->RGB conversion
+
+static const int16_t kCoeffs[4] = { kYScale, kVToR, kUToG, kVToG };
+
+#define v255 vmov_n_u8(255)
+
+#define STORE_Rgb(out, r, g, b) do { \
+ const uint8x8x3_t r_g_b = {{ r, g, b }}; \
+ vst3_u8(out, r_g_b); \
+} while (0)
+
+#define STORE_Bgr(out, r, g, b) do { \
+ const uint8x8x3_t b_g_r = {{ b, g, r }}; \
+ vst3_u8(out, b_g_r); \
+} while (0)
+
+#define STORE_Rgba(out, r, g, b) do { \
+ const uint8x8x4_t r_g_b_v255 = {{ r, g, b, v255 }}; \
+ vst4_u8(out, r_g_b_v255); \
+} while (0)
+
+#define STORE_Bgra(out, r, g, b) do { \
+ const uint8x8x4_t b_g_r_v255 = {{ b, g, r, v255 }}; \
+ vst4_u8(out, b_g_r_v255); \
+} while (0)
+
+#define CONVERT8(FMT, XSTEP, N, src_y, src_uv, out, cur_x) { \
+ int i; \
+ for (i = 0; i < N; i += 8) { \
+ const int off = ((cur_x) + i) * XSTEP; \
+ uint8x8_t y = vld1_u8((src_y) + (cur_x) + i); \
+ uint8x8_t u = vld1_u8((src_uv) + i); \
+ uint8x8_t v = vld1_u8((src_uv) + i + 16); \
+ const int16x8_t yy = vreinterpretq_s16_u16(vsubl_u8(y, u16)); \
+ const int16x8_t uu = vreinterpretq_s16_u16(vsubl_u8(u, u128)); \
+ const int16x8_t vv = vreinterpretq_s16_u16(vsubl_u8(v, u128)); \
+ int32x4_t yl = vmull_lane_s16(vget_low_s16(yy), cf16, 0); \
+ int32x4_t yh = vmull_lane_s16(vget_high_s16(yy), cf16, 0); \
+ const int32x4_t rl = vmlal_lane_s16(yl, vget_low_s16(vv), cf16, 1);\
+ const int32x4_t rh = vmlal_lane_s16(yh, vget_high_s16(vv), cf16, 1);\
+ int32x4_t gl = vmlsl_lane_s16(yl, vget_low_s16(uu), cf16, 2); \
+ int32x4_t gh = vmlsl_lane_s16(yh, vget_high_s16(uu), cf16, 2); \
+ const int32x4_t bl = vmovl_s16(vget_low_s16(uu)); \
+ const int32x4_t bh = vmovl_s16(vget_high_s16(uu)); \
+ gl = vmlsl_lane_s16(gl, vget_low_s16(vv), cf16, 3); \
+ gh = vmlsl_lane_s16(gh, vget_high_s16(vv), cf16, 3); \
+ yl = vmlaq_lane_s32(yl, bl, cf32, 0); \
+ yh = vmlaq_lane_s32(yh, bh, cf32, 0); \
+ /* vrshrn_n_s32() already incorporates the rounding constant */ \
+ y = vqmovun_s16(vcombine_s16(vrshrn_n_s32(rl, YUV_FIX2), \
+ vrshrn_n_s32(rh, YUV_FIX2))); \
+ u = vqmovun_s16(vcombine_s16(vrshrn_n_s32(gl, YUV_FIX2), \
+ vrshrn_n_s32(gh, YUV_FIX2))); \
+ v = vqmovun_s16(vcombine_s16(vrshrn_n_s32(yl, YUV_FIX2), \
+ vrshrn_n_s32(yh, YUV_FIX2))); \
+ STORE_ ## FMT(out + off, y, u, v); \
+ } \
+}
+
+#define CONVERT1(FUNC, XSTEP, N, src_y, src_uv, rgb, cur_x) { \
+ int i; \
+ for (i = 0; i < N; i++) { \
+ const int off = ((cur_x) + i) * XSTEP; \
+ const int y = src_y[(cur_x) + i]; \
+ const int u = (src_uv)[i]; \
+ const int v = (src_uv)[i + 16]; \
+ FUNC(y, u, v, rgb + off); \
+ } \
+}
+
+#define CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, uv, \
+ top_dst, bottom_dst, cur_x, len) { \
+ CONVERT8(FMT, XSTEP, len, top_y, uv, top_dst, cur_x) \
+ if (bottom_y != NULL) { \
+ CONVERT8(FMT, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x) \
+ } \
+}
+
+#define CONVERT2RGB_1(FUNC, XSTEP, top_y, bottom_y, uv, \
+ top_dst, bottom_dst, cur_x, len) { \
+ CONVERT1(FUNC, XSTEP, len, top_y, uv, top_dst, cur_x); \
+ if (bottom_y != NULL) { \
+ CONVERT1(FUNC, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x); \
+ } \
+}
+
+#define NEON_UPSAMPLE_FUNC(FUNC_NAME, FMT, XSTEP) \
+static void FUNC_NAME(const uint8_t *top_y, const uint8_t *bottom_y, \
+ const uint8_t *top_u, const uint8_t *top_v, \
+ const uint8_t *cur_u, const uint8_t *cur_v, \
+ uint8_t *top_dst, uint8_t *bottom_dst, int len) { \
+ int block; \
+ /* 16 byte aligned array to cache reconstructed u and v */ \
+ uint8_t uv_buf[2 * 32 + 15]; \
+ uint8_t *const r_uv = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \
+ const int uv_len = (len + 1) >> 1; \
+ /* 9 pixels must be read-able for each block */ \
+ const int num_blocks = (uv_len - 1) >> 3; \
+ const int leftover = uv_len - num_blocks * 8; \
+ const int last_pos = 1 + 16 * num_blocks; \
+ \
+ const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \
+ const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \
+ \
+ const int16x4_t cf16 = vld1_s16(kCoeffs); \
+ const int32x2_t cf32 = vmov_n_s32(kUToB); \
+ const uint8x8_t u16 = vmov_n_u8(16); \
+ const uint8x8_t u128 = vmov_n_u8(128); \
+ \
+ /* Treat the first pixel in regular way */ \
+ assert(top_y != NULL); \
+ { \
+ const int u0 = (top_u[0] + u_diag) >> 1; \
+ const int v0 = (top_v[0] + v_diag) >> 1; \
+ VP8YuvTo ## FMT(top_y[0], u0, v0, top_dst); \
+ } \
+ if (bottom_y != NULL) { \
+ const int u0 = (cur_u[0] + u_diag) >> 1; \
+ const int v0 = (cur_v[0] + v_diag) >> 1; \
+ VP8YuvTo ## FMT(bottom_y[0], u0, v0, bottom_dst); \
+ } \
+ \
+ for (block = 0; block < num_blocks; ++block) { \
+ UPSAMPLE_16PIXELS(top_u, cur_u, r_uv); \
+ UPSAMPLE_16PIXELS(top_v, cur_v, r_uv + 16); \
+ CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, r_uv, \
+ top_dst, bottom_dst, 16 * block + 1, 16); \
+ top_u += 8; \
+ cur_u += 8; \
+ top_v += 8; \
+ cur_v += 8; \
+ } \
+ \
+ UPSAMPLE_LAST_BLOCK(top_u, cur_u, leftover, r_uv); \
+ UPSAMPLE_LAST_BLOCK(top_v, cur_v, leftover, r_uv + 16); \
+ CONVERT2RGB_1(VP8YuvTo ## FMT, XSTEP, top_y, bottom_y, r_uv, \
+ top_dst, bottom_dst, last_pos, len - last_pos); \
+}
+
+// NEON variants of the fancy upsampler.
+NEON_UPSAMPLE_FUNC(UpsampleRgbLinePairNEON, Rgb, 3)
+NEON_UPSAMPLE_FUNC(UpsampleBgrLinePairNEON, Bgr, 3)
+NEON_UPSAMPLE_FUNC(UpsampleRgbaLinePairNEON, Rgba, 4)
+NEON_UPSAMPLE_FUNC(UpsampleBgraLinePairNEON, Bgra, 4)
+
+#endif // FANCY_UPSAMPLING
+
+#endif // WEBP_USE_NEON
+
+//------------------------------------------------------------------------------
+
+#ifdef FANCY_UPSAMPLING
+
+extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
+
+void WebPInitUpsamplersNEON(void) {
+#if defined(WEBP_USE_NEON)
+ WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePairNEON;
+ WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePairNEON;
+ WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePairNEON;
+ WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePairNEON;
+#endif // WEBP_USE_NEON
+}
+
+void WebPInitPremultiplyNEON(void) {
+#if defined(WEBP_USE_NEON)
+ WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePairNEON;
+ WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePairNEON;
+#endif // WEBP_USE_NEON
+}
+
+#else
+
+// this empty function is to avoid an empty .o
+void WebPInitPremultiplyNEON(void) {}
+
+#endif // FANCY_UPSAMPLING
+
diff --git a/drivers/webp/dsp/upsampling_sse2.c b/drivers/webp/dsp/upsampling_sse2.c
new file mode 100644
index 000000000..0db0798c6
--- /dev/null
+++ b/drivers/webp/dsp/upsampling_sse2.c
@@ -0,0 +1,218 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the COPYING file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+// -----------------------------------------------------------------------------
+//
+// SSE2 version of YUV to RGB upsampling functions.
+//
+// Author: somnath@google.com (Somnath Banerjee)
+
+#include "./dsp.h"
+
+#if defined(WEBP_USE_SSE2)
+
+#include <assert.h>
+#include <emmintrin.h>
+#include <string.h>
+#include "./yuv.h"
+
+#ifdef FANCY_UPSAMPLING
+
+// We compute (9*a + 3*b + 3*c + d + 8) / 16 as follows
+// u = (9*a + 3*b + 3*c + d + 8) / 16
+// = (a + (a + 3*b + 3*c + d) / 8 + 1) / 2
+// = (a + m + 1) / 2
+// where m = (a + 3*b + 3*c + d) / 8
+// = ((a + b + c + d) / 2 + b + c) / 4
+//
+// Let's say k = (a + b + c + d) / 4.
+// We can compute k as
+// k = (s + t + 1) / 2 - ((a^d) | (b^c) | (s^t)) & 1
+// where s = (a + d + 1) / 2 and t = (b + c + 1) / 2
+//
+// Then m can be written as
+// m = (k + t + 1) / 2 - (((b^c) & (s^t)) | (k^t)) & 1
+
+// Computes out = (k + in + 1) / 2 - ((ij & (s^t)) | (k^in)) & 1
+#define GET_M(ij, in, out) do { \
+ const __m128i tmp0 = _mm_avg_epu8(k, (in)); /* (k + in + 1) / 2 */ \
+ const __m128i tmp1 = _mm_and_si128((ij), st); /* (ij) & (s^t) */ \
+ const __m128i tmp2 = _mm_xor_si128(k, (in)); /* (k^in) */ \
+ const __m128i tmp3 = _mm_or_si128(tmp1, tmp2); /* ((ij) & (s^t)) | (k^in) */\
+ const __m128i tmp4 = _mm_and_si128(tmp3, one); /* & 1 -> lsb_correction */ \
+ (out) = _mm_sub_epi8(tmp0, tmp4); /* (k + in + 1) / 2 - lsb_correction */ \
+} while (0)
+
+// pack and store two alternating pixel rows
+#define PACK_AND_STORE(a, b, da, db, out) do { \
+ const __m128i t_a = _mm_avg_epu8(a, da); /* (9a + 3b + 3c + d + 8) / 16 */ \
+ const __m128i t_b = _mm_avg_epu8(b, db); /* (3a + 9b + c + 3d + 8) / 16 */ \
+ const __m128i t_1 = _mm_unpacklo_epi8(t_a, t_b); \
+ const __m128i t_2 = _mm_unpackhi_epi8(t_a, t_b); \
+ _mm_store_si128(((__m128i*)(out)) + 0, t_1); \
+ _mm_store_si128(((__m128i*)(out)) + 1, t_2); \
+} while (0)
+
+// Loads 17 pixels each from rows r1 and r2 and generates 32 pixels.
+#define UPSAMPLE_32PIXELS(r1, r2, out) { \
+ const __m128i one = _mm_set1_epi8(1); \
+ const __m128i a = _mm_loadu_si128((__m128i*)&(r1)[0]); \
+ const __m128i b = _mm_loadu_si128((__m128i*)&(r1)[1]); \
+ const __m128i c = _mm_loadu_si128((__m128i*)&(r2)[0]); \
+ const __m128i d = _mm_loadu_si128((__m128i*)&(r2)[1]); \
+ \
+ const __m128i s = _mm_avg_epu8(a, d); /* s = (a + d + 1) / 2 */ \
+ const __m128i t = _mm_avg_epu8(b, c); /* t = (b + c + 1) / 2 */ \
+ const __m128i st = _mm_xor_si128(s, t); /* st = s^t */ \
+ \
+ const __m128i ad = _mm_xor_si128(a, d); /* ad = a^d */ \
+ const __m128i bc = _mm_xor_si128(b, c); /* bc = b^c */ \
+ \
+ const __m128i t1 = _mm_or_si128(ad, bc); /* (a^d) | (b^c) */ \
+ const __m128i t2 = _mm_or_si128(t1, st); /* (a^d) | (b^c) | (s^t) */ \
+ const __m128i t3 = _mm_and_si128(t2, one); /* (a^d) | (b^c) | (s^t) & 1 */ \
+ const __m128i t4 = _mm_avg_epu8(s, t); \
+ const __m128i k = _mm_sub_epi8(t4, t3); /* k = (a + b + c + d) / 4 */ \
+ __m128i diag1, diag2; \
+ \
+ GET_M(bc, t, diag1); /* diag1 = (a + 3b + 3c + d) / 8 */ \
+ GET_M(ad, s, diag2); /* diag2 = (3a + b + c + 3d) / 8 */ \
+ \
+ /* pack the alternate pixels */ \
+ PACK_AND_STORE(a, b, diag1, diag2, out + 0); /* store top */ \
+ PACK_AND_STORE(c, d, diag2, diag1, out + 2 * 32); /* store bottom */ \
+}
+
+// Turn the macro into a function for reducing code-size when non-critical
+static void Upsample32Pixels(const uint8_t r1[], const uint8_t r2[],
+ uint8_t* const out) {
+ UPSAMPLE_32PIXELS(r1, r2, out);
+}
+
+#define UPSAMPLE_LAST_BLOCK(tb, bb, num_pixels, out) { \
+ uint8_t r1[17], r2[17]; \
+ memcpy(r1, (tb), (num_pixels)); \
+ memcpy(r2, (bb), (num_pixels)); \
+ /* replicate last byte */ \
+ memset(r1 + (num_pixels), r1[(num_pixels) - 1], 17 - (num_pixels)); \
+ memset(r2 + (num_pixels), r2[(num_pixels) - 1], 17 - (num_pixels)); \
+ /* using the shared function instead of the macro saves ~3k code size */ \
+ Upsample32Pixels(r1, r2, out); \
+}
+
+#define CONVERT2RGB(FUNC, XSTEP, top_y, bottom_y, \
+ top_dst, bottom_dst, cur_x, num_pixels) { \
+ int n; \
+ for (n = 0; n < (num_pixels); ++n) { \
+ FUNC(top_y[(cur_x) + n], r_u[n], r_v[n], \
+ top_dst + ((cur_x) + n) * XSTEP); \
+ } \
+ if (bottom_y != NULL) { \
+ for (n = 0; n < (num_pixels); ++n) { \
+ FUNC(bottom_y[(cur_x) + n], r_u[64 + n], r_v[64 + n], \
+ bottom_dst + ((cur_x) + n) * XSTEP); \
+ } \
+ } \
+}
+
+#define CONVERT2RGB_32(FUNC, XSTEP, top_y, bottom_y, \
+ top_dst, bottom_dst, cur_x) do { \
+ FUNC##32(top_y + (cur_x), r_u, r_v, top_dst + (cur_x) * XSTEP); \
+ if (bottom_y != NULL) { \
+ FUNC##32(bottom_y + (cur_x), r_u + 64, r_v + 64, \
+ bottom_dst + (cur_x) * XSTEP); \
+ } \
+} while (0)
+
+#define SSE2_UPSAMPLE_FUNC(FUNC_NAME, FUNC, XSTEP) \
+static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
+ const uint8_t* top_u, const uint8_t* top_v, \
+ const uint8_t* cur_u, const uint8_t* cur_v, \
+ uint8_t* top_dst, uint8_t* bottom_dst, int len) { \
+ int uv_pos, pos; \
+ /* 16byte-aligned array to cache reconstructed u and v */ \
+ uint8_t uv_buf[4 * 32 + 15]; \
+ uint8_t* const r_u = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \
+ uint8_t* const r_v = r_u + 32; \
+ \
+ assert(top_y != NULL); \
+ { /* Treat the first pixel in regular way */ \
+ const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \
+ const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \
+ const int u0_t = (top_u[0] + u_diag) >> 1; \
+ const int v0_t = (top_v[0] + v_diag) >> 1; \
+ FUNC(top_y[0], u0_t, v0_t, top_dst); \
+ if (bottom_y != NULL) { \
+ const int u0_b = (cur_u[0] + u_diag) >> 1; \
+ const int v0_b = (cur_v[0] + v_diag) >> 1; \
+ FUNC(bottom_y[0], u0_b, v0_b, bottom_dst); \
+ } \
+ } \
+ /* For UPSAMPLE_32PIXELS, 17 u/v values must be read-able for each block */ \
+ for (pos = 1, uv_pos = 0; pos + 32 + 1 <= len; pos += 32, uv_pos += 16) { \
+ UPSAMPLE_32PIXELS(top_u + uv_pos, cur_u + uv_pos, r_u); \
+ UPSAMPLE_32PIXELS(top_v + uv_pos, cur_v + uv_pos, r_v); \
+ CONVERT2RGB_32(FUNC, XSTEP, top_y, bottom_y, top_dst, bottom_dst, pos); \
+ } \
+ if (len > 1) { \
+ const int left_over = ((len + 1) >> 1) - (pos >> 1); \
+ assert(left_over > 0); \
+ UPSAMPLE_LAST_BLOCK(top_u + uv_pos, cur_u + uv_pos, left_over, r_u); \
+ UPSAMPLE_LAST_BLOCK(top_v + uv_pos, cur_v + uv_pos, left_over, r_v); \
+ CONVERT2RGB(FUNC, XSTEP, top_y, bottom_y, top_dst, bottom_dst, \
+ pos, len - pos); \
+ } \
+}
+
+// SSE2 variants of the fancy upsampler.
+SSE2_UPSAMPLE_FUNC(UpsampleRgbLinePairSSE2, VP8YuvToRgb, 3)
+SSE2_UPSAMPLE_FUNC(UpsampleBgrLinePairSSE2, VP8YuvToBgr, 3)
+SSE2_UPSAMPLE_FUNC(UpsampleRgbaLinePairSSE2, VP8YuvToRgba, 4)
+SSE2_UPSAMPLE_FUNC(UpsampleBgraLinePairSSE2, VP8YuvToBgra, 4)
+
+#undef GET_M
+#undef PACK_AND_STORE
+#undef UPSAMPLE_32PIXELS
+#undef UPSAMPLE_LAST_BLOCK
+#undef CONVERT2RGB
+#undef CONVERT2RGB_32
+#undef SSE2_UPSAMPLE_FUNC
+
+#endif // FANCY_UPSAMPLING
+
+#endif // WEBP_USE_SSE2
+
+//------------------------------------------------------------------------------
+
+#ifdef FANCY_UPSAMPLING
+
+extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
+
+void WebPInitUpsamplersSSE2(void) {
+#if defined(WEBP_USE_SSE2)
+ VP8YUVInitSSE2();
+ WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePairSSE2;
+ WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePairSSE2;
+ WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePairSSE2;
+ WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePairSSE2;
+#endif // WEBP_USE_SSE2
+}
+
+void WebPInitPremultiplySSE2(void) {
+#if defined(WEBP_USE_SSE2)
+ WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePairSSE2;
+ WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePairSSE2;
+#endif // WEBP_USE_SSE2
+}
+
+#else
+
+// this empty function is to avoid an empty .o
+void WebPInitPremultiplySSE2(void) {}
+
+#endif // FANCY_UPSAMPLING
+
diff --git a/drivers/webp/dsp/yuv.c b/drivers/webp/dsp/yuv.c
new file mode 100644
index 000000000..4f9cafc10
--- /dev/null
+++ b/drivers/webp/dsp/yuv.c
@@ -0,0 +1,207 @@
+// Copyright 2010 Google Inc. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the COPYING file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+// -----------------------------------------------------------------------------
+//
+// YUV->RGB conversion function
+//
+// Author: Skal (pascal.massimino@gmail.com)
+
+#include "./yuv.h"
+
+
+#if defined(WEBP_YUV_USE_TABLE)
+
+static int done = 0;
+
+static WEBP_INLINE uint8_t clip(int v, int max_value) {
+ return v < 0 ? 0 : v > max_value ? max_value : v;
+}
+
+int16_t VP8kVToR[256], VP8kUToB[256];
+int32_t VP8kVToG[256], VP8kUToG[256];
+uint8_t VP8kClip[YUV_RANGE_MAX - YUV_RANGE_MIN];
+uint8_t VP8kClip4Bits[YUV_RANGE_MAX - YUV_RANGE_MIN];
+
+void VP8YUVInit(void) {
+ int i;
+ if (done) {
+ return;
+ }
+#ifndef USE_YUVj
+ for (i = 0; i < 256; ++i) {
+ VP8kVToR[i] = (89858 * (i - 128) + YUV_HALF) >> YUV_FIX;
+ VP8kUToG[i] = -22014 * (i - 128) + YUV_HALF;
+ VP8kVToG[i] = -45773 * (i - 128);
+ VP8kUToB[i] = (113618 * (i - 128) + YUV_HALF) >> YUV_FIX;
+ }
+ for (i = YUV_RANGE_MIN; i < YUV_RANGE_MAX; ++i) {
+ const int k = ((i - 16) * 76283 + YUV_HALF) >> YUV_FIX;
+ VP8kClip[i - YUV_RANGE_MIN] = clip(k, 255);
+ VP8kClip4Bits[i - YUV_RANGE_MIN] = clip((k + 8) >> 4, 15);
+ }
+#else
+ for (i = 0; i < 256; ++i) {
+ VP8kVToR[i] = (91881 * (i - 128) + YUV_HALF) >> YUV_FIX;
+ VP8kUToG[i] = -22554 * (i - 128) + YUV_HALF;
+ VP8kVToG[i] = -46802 * (i - 128);
+ VP8kUToB[i] = (116130 * (i - 128) + YUV_HALF) >> YUV_FIX;
+ }
+ for (i = YUV_RANGE_MIN; i < YUV_RANGE_MAX; ++i) {
+ const int k = i;
+ VP8kClip[i - YUV_RANGE_MIN] = clip(k, 255);
+ VP8kClip4Bits[i - YUV_RANGE_MIN] = clip((k + 8) >> 4, 15);
+ }
+#endif
+
+ done = 1;
+}
+
+#else
+
+void VP8YUVInit(void) {}
+
+#endif // WEBP_YUV_USE_TABLE
+
+//-----------------------------------------------------------------------------
+// SSE2 extras
+
+#if defined(WEBP_USE_SSE2)
+
+#ifdef FANCY_UPSAMPLING
+
+#include <emmintrin.h>
+#include <string.h> // for memcpy
+
+typedef union { // handy struct for converting SSE2 registers
+ int32_t i32[4];
+ uint8_t u8[16];
+ __m128i m;
+} VP8kCstSSE2;
+
+static int done_sse2 = 0;
+static VP8kCstSSE2 VP8kUtoRGBA[256], VP8kVtoRGBA[256], VP8kYtoRGBA[256];
+
+void VP8YUVInitSSE2(void) {
+ if (!done_sse2) {
+ int i;
+ for (i = 0; i < 256; ++i) {
+ VP8kYtoRGBA[i].i32[0] =
+ VP8kYtoRGBA[i].i32[1] =
+ VP8kYtoRGBA[i].i32[2] = (i - 16) * kYScale + YUV_HALF2;
+ VP8kYtoRGBA[i].i32[3] = 0xff << YUV_FIX2;
+
+ VP8kUtoRGBA[i].i32[0] = 0;
+ VP8kUtoRGBA[i].i32[1] = -kUToG * (i - 128);
+ VP8kUtoRGBA[i].i32[2] = kUToB * (i - 128);
+ VP8kUtoRGBA[i].i32[3] = 0;
+
+ VP8kVtoRGBA[i].i32[0] = kVToR * (i - 128);
+ VP8kVtoRGBA[i].i32[1] = -kVToG * (i - 128);
+ VP8kVtoRGBA[i].i32[2] = 0;
+ VP8kVtoRGBA[i].i32[3] = 0;
+ }
+ done_sse2 = 1;
+ }
+}
+
+static WEBP_INLINE __m128i VP8GetRGBA32b(int y, int u, int v) {
+ const __m128i u_part = _mm_loadu_si128(&VP8kUtoRGBA[u].m);
+ const __m128i v_part = _mm_loadu_si128(&VP8kVtoRGBA[v].m);
+ const __m128i y_part = _mm_loadu_si128(&VP8kYtoRGBA[y].m);
+ const __m128i uv_part = _mm_add_epi32(u_part, v_part);
+ const __m128i rgba1 = _mm_add_epi32(y_part, uv_part);
+ const __m128i rgba2 = _mm_srai_epi32(rgba1, YUV_FIX2);
+ return rgba2;
+}
+
+static WEBP_INLINE void VP8YuvToRgbSSE2(uint8_t y, uint8_t u, uint8_t v,
+ uint8_t* const rgb) {
+ const __m128i tmp0 = VP8GetRGBA32b(y, u, v);
+ const __m128i tmp1 = _mm_packs_epi32(tmp0, tmp0);
+ const __m128i tmp2 = _mm_packus_epi16(tmp1, tmp1);
+ // Note: we store 8 bytes at a time, not 3 bytes! -> memory stomp
+ _mm_storel_epi64((__m128i*)rgb, tmp2);
+}
+
+static WEBP_INLINE void VP8YuvToBgrSSE2(uint8_t y, uint8_t u, uint8_t v,
+ uint8_t* const bgr) {
+ const __m128i tmp0 = VP8GetRGBA32b(y, u, v);
+ const __m128i tmp1 = _mm_shuffle_epi32(tmp0, _MM_SHUFFLE(3, 0, 1, 2));
+ const __m128i tmp2 = _mm_packs_epi32(tmp1, tmp1);
+ const __m128i tmp3 = _mm_packus_epi16(tmp2, tmp2);
+ // Note: we store 8 bytes at a time, not 3 bytes! -> memory stomp
+ _mm_storel_epi64((__m128i*)bgr, tmp3);
+}
+
+void VP8YuvToRgba32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
+ uint8_t* dst) {
+ int n;
+ for (n = 0; n < 32; n += 4) {
+ const __m128i tmp0_1 = VP8GetRGBA32b(y[n + 0], u[n + 0], v[n + 0]);
+ const __m128i tmp0_2 = VP8GetRGBA32b(y[n + 1], u[n + 1], v[n + 1]);
+ const __m128i tmp0_3 = VP8GetRGBA32b(y[n + 2], u[n + 2], v[n + 2]);
+ const __m128i tmp0_4 = VP8GetRGBA32b(y[n + 3], u[n + 3], v[n + 3]);
+ const __m128i tmp1_1 = _mm_packs_epi32(tmp0_1, tmp0_2);
+ const __m128i tmp1_2 = _mm_packs_epi32(tmp0_3, tmp0_4);
+ const __m128i tmp2 = _mm_packus_epi16(tmp1_1, tmp1_2);
+ _mm_storeu_si128((__m128i*)dst, tmp2);
+ dst += 4 * 4;
+ }
+}
+
+void VP8YuvToBgra32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
+ uint8_t* dst) {
+ int n;
+ for (n = 0; n < 32; n += 2) {
+ const __m128i tmp0_1 = VP8GetRGBA32b(y[n + 0], u[n + 0], v[n + 0]);
+ const __m128i tmp0_2 = VP8GetRGBA32b(y[n + 1], u[n + 1], v[n + 1]);
+ const __m128i tmp1_1 = _mm_shuffle_epi32(tmp0_1, _MM_SHUFFLE(3, 0, 1, 2));
+ const __m128i tmp1_2 = _mm_shuffle_epi32(tmp0_2, _MM_SHUFFLE(3, 0, 1, 2));
+ const __m128i tmp2_1 = _mm_packs_epi32(tmp1_1, tmp1_2);
+ const __m128i tmp3 = _mm_packus_epi16(tmp2_1, tmp2_1);
+ _mm_storel_epi64((__m128i*)dst, tmp3);
+ dst += 4 * 2;
+ }
+}
+
+void VP8YuvToRgb32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
+ uint8_t* dst) {
+ int n;
+ uint8_t tmp0[2 * 3 + 5 + 15];
+ uint8_t* const tmp = (uint8_t*)((uintptr_t)(tmp0 + 15) & ~15); // align
+ for (n = 0; n < 30; ++n) { // we directly stomp the *dst memory
+ VP8YuvToRgbSSE2(y[n], u[n], v[n], dst + n * 3);
+ }
+ // Last two pixels are special: we write in a tmp buffer before sending
+ // to dst.
+ VP8YuvToRgbSSE2(y[n + 0], u[n + 0], v[n + 0], tmp + 0);
+ VP8YuvToRgbSSE2(y[n + 1], u[n + 1], v[n + 1], tmp + 3);
+ memcpy(dst + n * 3, tmp, 2 * 3);
+}
+
+void VP8YuvToBgr32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
+ uint8_t* dst) {
+ int n;
+ uint8_t tmp0[2 * 3 + 5 + 15];
+ uint8_t* const tmp = (uint8_t*)((uintptr_t)(tmp0 + 15) & ~15); // align
+ for (n = 0; n < 30; ++n) {
+ VP8YuvToBgrSSE2(y[n], u[n], v[n], dst + n * 3);
+ }
+ VP8YuvToBgrSSE2(y[n + 0], u[n + 0], v[n + 0], tmp + 0);
+ VP8YuvToBgrSSE2(y[n + 1], u[n + 1], v[n + 1], tmp + 3);
+ memcpy(dst + n * 3, tmp, 2 * 3);
+}
+
+#else
+
+void VP8YUVInitSSE2(void) {}
+
+#endif // FANCY_UPSAMPLING
+
+#endif // WEBP_USE_SSE2
+
diff --git a/drivers/webp/dsp/yuv.h b/drivers/webp/dsp/yuv.h
new file mode 100644
index 000000000..dd778f9cb
--- /dev/null
+++ b/drivers/webp/dsp/yuv.h
@@ -0,0 +1,317 @@
+// Copyright 2010 Google Inc. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the COPYING file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+// -----------------------------------------------------------------------------
+//
+// inline YUV<->RGB conversion function
+//
+// The exact naming is Y'CbCr, following the ITU-R BT.601 standard.
+// More information at: http://en.wikipedia.org/wiki/YCbCr
+// Y = 0.2569 * R + 0.5044 * G + 0.0979 * B + 16
+// U = -0.1483 * R - 0.2911 * G + 0.4394 * B + 128
+// V = 0.4394 * R - 0.3679 * G - 0.0715 * B + 128
+// We use 16bit fixed point operations for RGB->YUV conversion (YUV_FIX).
+//
+// For the Y'CbCr to RGB conversion, the BT.601 specification reads:
+// R = 1.164 * (Y-16) + 1.596 * (V-128)
+// G = 1.164 * (Y-16) - 0.813 * (V-128) - 0.391 * (U-128)
+// B = 1.164 * (Y-16) + 2.018 * (U-128)
+// where Y is in the [16,235] range, and U/V in the [16,240] range.
+// In the table-lookup version (WEBP_YUV_USE_TABLE), the common factor
+// "1.164 * (Y-16)" can be handled as an offset in the VP8kClip[] table.
+// So in this case the formulae should read:
+// R = 1.164 * [Y + 1.371 * (V-128) ] - 18.624
+// G = 1.164 * [Y - 0.698 * (V-128) - 0.336 * (U-128)] - 18.624
+// B = 1.164 * [Y + 1.733 * (U-128)] - 18.624
+// once factorized.
+// For YUV->RGB conversion, only 14bit fixed precision is used (YUV_FIX2).
+// That's the maximum possible for a convenient ARM implementation.
+//
+// Author: Skal (pascal.massimino@gmail.com)
+
+#ifndef WEBP_DSP_YUV_H_
+#define WEBP_DSP_YUV_H_
+
+#include "./dsp.h"
+#include "../dec/decode_vp8.h"
+
+// Define the following to use the LUT-based code:
+// #define WEBP_YUV_USE_TABLE
+
+#if defined(WEBP_EXPERIMENTAL_FEATURES)
+// Do NOT activate this feature for real compression. This is only experimental!
+// This flag is for comparison purpose against JPEG's "YUVj" natural colorspace.
+// This colorspace is close to Rec.601's Y'CbCr model with the notable
+// difference of allowing larger range for luma/chroma.
+// See http://en.wikipedia.org/wiki/YCbCr#JPEG_conversion paragraph, and its
+// difference with http://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion
+// #define USE_YUVj
+#endif
+
+//------------------------------------------------------------------------------
+// YUV -> RGB conversion
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum {
+ YUV_FIX = 16, // fixed-point precision for RGB->YUV
+ YUV_HALF = 1 << (YUV_FIX - 1),
+ YUV_MASK = (256 << YUV_FIX) - 1,
+ YUV_RANGE_MIN = -227, // min value of r/g/b output
+ YUV_RANGE_MAX = 256 + 226, // max value of r/g/b output
+
+ YUV_FIX2 = 14, // fixed-point precision for YUV->RGB
+ YUV_HALF2 = 1 << (YUV_FIX2 - 1),
+ YUV_MASK2 = (256 << YUV_FIX2) - 1
+};
+
+// These constants are 14b fixed-point version of ITU-R BT.601 constants.
+#define kYScale 19077 // 1.164 = 255 / 219
+#define kVToR 26149 // 1.596 = 255 / 112 * 0.701
+#define kUToG 6419 // 0.391 = 255 / 112 * 0.886 * 0.114 / 0.587
+#define kVToG 13320 // 0.813 = 255 / 112 * 0.701 * 0.299 / 0.587
+#define kUToB 33050 // 2.018 = 255 / 112 * 0.886
+#define kRCst (-kYScale * 16 - kVToR * 128 + YUV_HALF2)
+#define kGCst (-kYScale * 16 + kUToG * 128 + kVToG * 128 + YUV_HALF2)
+#define kBCst (-kYScale * 16 - kUToB * 128 + YUV_HALF2)
+
+//------------------------------------------------------------------------------
+
+#if !defined(WEBP_YUV_USE_TABLE)
+
+// slower on x86 by ~7-8%, but bit-exact with the SSE2 version
+
+static WEBP_INLINE int VP8Clip8(int v) {
+ return ((v & ~YUV_MASK2) == 0) ? (v >> YUV_FIX2) : (v < 0) ? 0 : 255;
+}
+
+static WEBP_INLINE int VP8YUVToR(int y, int v) {
+ return VP8Clip8(kYScale * y + kVToR * v + kRCst);
+}
+
+static WEBP_INLINE int VP8YUVToG(int y, int u, int v) {
+ return VP8Clip8(kYScale * y - kUToG * u - kVToG * v + kGCst);
+}
+
+static WEBP_INLINE int VP8YUVToB(int y, int u) {
+ return VP8Clip8(kYScale * y + kUToB * u + kBCst);
+}
+
+static WEBP_INLINE void VP8YuvToRgb(int y, int u, int v,
+ uint8_t* const rgb) {
+ rgb[0] = VP8YUVToR(y, v);
+ rgb[1] = VP8YUVToG(y, u, v);
+ rgb[2] = VP8YUVToB(y, u);
+}
+
+static WEBP_INLINE void VP8YuvToBgr(int y, int u, int v,
+ uint8_t* const bgr) {
+ bgr[0] = VP8YUVToB(y, u);
+ bgr[1] = VP8YUVToG(y, u, v);
+ bgr[2] = VP8YUVToR(y, v);
+}
+
+static WEBP_INLINE void VP8YuvToRgb565(int y, int u, int v,
+ uint8_t* const rgb) {
+ const int r = VP8YUVToR(y, v); // 5 usable bits
+ const int g = VP8YUVToG(y, u, v); // 6 usable bits
+ const int b = VP8YUVToB(y, u); // 5 usable bits
+ const int rg = (r & 0xf8) | (g >> 5);
+ const int gb = ((g << 3) & 0xe0) | (b >> 3);
+#ifdef WEBP_SWAP_16BIT_CSP
+ rgb[0] = gb;
+ rgb[1] = rg;
+#else
+ rgb[0] = rg;
+ rgb[1] = gb;
+#endif
+}
+
+static WEBP_INLINE void VP8YuvToRgba4444(int y, int u, int v,
+ uint8_t* const argb) {
+ const int r = VP8YUVToR(y, v); // 4 usable bits
+ const int g = VP8YUVToG(y, u, v); // 4 usable bits
+ const int b = VP8YUVToB(y, u); // 4 usable bits
+ const int rg = (r & 0xf0) | (g >> 4);
+ const int ba = (b & 0xf0) | 0x0f; // overwrite the lower 4 bits
+#ifdef WEBP_SWAP_16BIT_CSP
+ argb[0] = ba;
+ argb[1] = rg;
+#else
+ argb[0] = rg;
+ argb[1] = ba;
+#endif
+}
+
+#else
+
+// Table-based version, not totally equivalent to the SSE2 version.
+// Rounding diff is only +/-1 though.
+
+extern int16_t VP8kVToR[256], VP8kUToB[256];
+extern int32_t VP8kVToG[256], VP8kUToG[256];
+extern uint8_t VP8kClip[YUV_RANGE_MAX - YUV_RANGE_MIN];
+extern uint8_t VP8kClip4Bits[YUV_RANGE_MAX - YUV_RANGE_MIN];
+
+static WEBP_INLINE void VP8YuvToRgb(int y, int u, int v,
+ uint8_t* const rgb) {
+ const int r_off = VP8kVToR[v];
+ const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
+ const int b_off = VP8kUToB[u];
+ rgb[0] = VP8kClip[y + r_off - YUV_RANGE_MIN];
+ rgb[1] = VP8kClip[y + g_off - YUV_RANGE_MIN];
+ rgb[2] = VP8kClip[y + b_off - YUV_RANGE_MIN];
+}
+
+static WEBP_INLINE void VP8YuvToBgr(int y, int u, int v,
+ uint8_t* const bgr) {
+ const int r_off = VP8kVToR[v];
+ const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
+ const int b_off = VP8kUToB[u];
+ bgr[0] = VP8kClip[y + b_off - YUV_RANGE_MIN];
+ bgr[1] = VP8kClip[y + g_off - YUV_RANGE_MIN];
+ bgr[2] = VP8kClip[y + r_off - YUV_RANGE_MIN];
+}
+
+static WEBP_INLINE void VP8YuvToRgb565(int y, int u, int v,
+ uint8_t* const rgb) {
+ const int r_off = VP8kVToR[v];
+ const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
+ const int b_off = VP8kUToB[u];
+ const int rg = ((VP8kClip[y + r_off - YUV_RANGE_MIN] & 0xf8) |
+ (VP8kClip[y + g_off - YUV_RANGE_MIN] >> 5));
+ const int gb = (((VP8kClip[y + g_off - YUV_RANGE_MIN] << 3) & 0xe0) |
+ (VP8kClip[y + b_off - YUV_RANGE_MIN] >> 3));
+#ifdef WEBP_SWAP_16BIT_CSP
+ rgb[0] = gb;
+ rgb[1] = rg;
+#else
+ rgb[0] = rg;
+ rgb[1] = gb;
+#endif
+}
+
+static WEBP_INLINE void VP8YuvToRgba4444(int y, int u, int v,
+ uint8_t* const argb) {
+ const int r_off = VP8kVToR[v];
+ const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
+ const int b_off = VP8kUToB[u];
+ const int rg = ((VP8kClip4Bits[y + r_off - YUV_RANGE_MIN] << 4) |
+ VP8kClip4Bits[y + g_off - YUV_RANGE_MIN]);
+ const int ba = (VP8kClip4Bits[y + b_off - YUV_RANGE_MIN] << 4) | 0x0f;
+#ifdef WEBP_SWAP_16BIT_CSP
+ argb[0] = ba;
+ argb[1] = rg;
+#else
+ argb[0] = rg;
+ argb[1] = ba;
+#endif
+}
+
+#endif // WEBP_YUV_USE_TABLE
+
+//-----------------------------------------------------------------------------
+// Alpha handling variants
+
+static WEBP_INLINE void VP8YuvToArgb(uint8_t y, uint8_t u, uint8_t v,
+ uint8_t* const argb) {
+ argb[0] = 0xff;
+ VP8YuvToRgb(y, u, v, argb + 1);
+}
+
+static WEBP_INLINE void VP8YuvToBgra(uint8_t y, uint8_t u, uint8_t v,
+ uint8_t* const bgra) {
+ VP8YuvToBgr(y, u, v, bgra);
+ bgra[3] = 0xff;
+}
+
+static WEBP_INLINE void VP8YuvToRgba(uint8_t y, uint8_t u, uint8_t v,
+ uint8_t* const rgba) {
+ VP8YuvToRgb(y, u, v, rgba);
+ rgba[3] = 0xff;
+}
+
+// Must be called before everything, to initialize the tables.
+void VP8YUVInit(void);
+
+//-----------------------------------------------------------------------------
+// SSE2 extra functions (mostly for upsampling_sse2.c)
+
+#if defined(WEBP_USE_SSE2)
+
+#if defined(FANCY_UPSAMPLING)
+// Process 32 pixels and store the result (24b or 32b per pixel) in *dst.
+void VP8YuvToRgba32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
+ uint8_t* dst);
+void VP8YuvToRgb32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
+ uint8_t* dst);
+void VP8YuvToBgra32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
+ uint8_t* dst);
+void VP8YuvToBgr32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
+ uint8_t* dst);
+#endif // FANCY_UPSAMPLING
+
+// Must be called to initialize tables before using the functions.
+void VP8YUVInitSSE2(void);
+
+#endif // WEBP_USE_SSE2
+
+//------------------------------------------------------------------------------
+// RGB -> YUV conversion
+
+// Stub functions that can be called with various rounding values:
+static WEBP_INLINE int VP8ClipUV(int uv, int rounding) {
+ uv = (uv + rounding + (128 << (YUV_FIX + 2))) >> (YUV_FIX + 2);
+ return ((uv & ~0xff) == 0) ? uv : (uv < 0) ? 0 : 255;
+}
+
+#ifndef USE_YUVj
+
+static WEBP_INLINE int VP8RGBToY(int r, int g, int b, int rounding) {
+ const int luma = 16839 * r + 33059 * g + 6420 * b;
+ return (luma + rounding + (16 << YUV_FIX)) >> YUV_FIX; // no need to clip
+}
+
+static WEBP_INLINE int VP8RGBToU(int r, int g, int b, int rounding) {
+ const int u = -9719 * r - 19081 * g + 28800 * b;
+ return VP8ClipUV(u, rounding);
+}
+
+static WEBP_INLINE int VP8RGBToV(int r, int g, int b, int rounding) {
+ const int v = +28800 * r - 24116 * g - 4684 * b;
+ return VP8ClipUV(v, rounding);
+}
+
+#else
+
+// This JPEG-YUV colorspace, only for comparison!
+// These are also 16bit precision coefficients from Rec.601, but with full
+// [0..255] output range.
+static WEBP_INLINE int VP8RGBToY(int r, int g, int b, int rounding) {
+ const int luma = 19595 * r + 38470 * g + 7471 * b;
+ return (luma + rounding) >> YUV_FIX; // no need to clip
+}
+
+static WEBP_INLINE int VP8_RGB_TO_U(int r, int g, int b, int rounding) {
+ const int u = -11058 * r - 21710 * g + 32768 * b;
+ return VP8ClipUV(u, rounding);
+}
+
+static WEBP_INLINE int VP8_RGB_TO_V(int r, int g, int b, int rounding) {
+ const int v = 32768 * r - 27439 * g - 5329 * b;
+ return VP8ClipUV(v, rounding);
+}
+
+#endif // USE_YUVj
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif /* WEBP_DSP_YUV_H_ */