Quantize (64-bit only, for now) SSSE3 SIMD.
authorRonald S. Bultje <rbultje@google.com>
Mon, 1 Jul 2013 18:36:07 +0000 (11:36 -0700)
committerRonald S. Bultje <rbultje@google.com>
Mon, 1 Jul 2013 18:36:07 +0000 (11:36 -0700)
Total encoding time for first 50 frames of bus (speed 0) @ 1500kbps
goes 2min34.8 to 2min14.4, i.e. a 10.4% overall speedup. The code is
x86-64 only, it needs some minor modifications to be 32bit compatible,
because it uses 15 xmm registers, whereas 32bit only has 8.

Change-Id: I2df53770c2e850813ffa713e1a91b45b0082b904

13 files changed:
vp9/common/vp9_entropy.c
vp9/common/vp9_entropy.h
vp9/common/vp9_onyxc_int.h
vp9/common/vp9_rtcd_defs.sh
vp9/decoder/vp9_detokenize.c
vp9/encoder/vp9_block.h
vp9/encoder/vp9_encodemb.c
vp9/encoder/vp9_onyx_int.h
vp9/encoder/vp9_quantize.c
vp9/encoder/vp9_rdopt.c
vp9/encoder/vp9_tokenize.c
vp9/encoder/x86/vp9_quantize_ssse3.asm [new file with mode: 0644]
vp9/vp9cx.mk

index d5f94ed..ca8b879 100644 (file)
@@ -50,28 +50,28 @@ DECLARE_ALIGNED(16, const uint8_t, vp9_pt_energy_class[MAX_ENTROPY_TOKENS]) = {
   0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 5
 };
 
-DECLARE_ALIGNED(16, const int, vp9_default_scan_4x4[16]) = {
+DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_4x4[16]) = {
   0,  4,  1,  5,
   8,  2, 12,  9,
   3,  6, 13, 10,
   7, 14, 11, 15,
 };
 
-DECLARE_ALIGNED(16, const int, vp9_col_scan_4x4[16]) = {
+DECLARE_ALIGNED(16, const int16_t, vp9_col_scan_4x4[16]) = {
   0,  4,  8,  1,
   12,  5,  9,  2,
   13,  6, 10,  3,
   7, 14, 11, 15,
 };
 
-DECLARE_ALIGNED(16, const int, vp9_row_scan_4x4[16]) = {
+DECLARE_ALIGNED(16, const int16_t, vp9_row_scan_4x4[16]) = {
   0,  1,  4,  2,
   5,  3,  6,  8,
   9,  7, 12, 10,
   13, 11, 14, 15,
 };
 
-DECLARE_ALIGNED(64, const int, vp9_default_scan_8x8[64]) = {
+DECLARE_ALIGNED(64, const int16_t, vp9_default_scan_8x8[64]) = {
   0,  8,  1, 16,  9,  2, 17, 24,
   10,  3, 18, 25, 32, 11,  4, 26,
   33, 19, 40, 12, 34, 27,  5, 41,
@@ -82,7 +82,7 @@ DECLARE_ALIGNED(64, const int, vp9_default_scan_8x8[64]) = {
   46, 39, 61, 54, 47, 62, 55, 63,
 };
 
-DECLARE_ALIGNED(16, const int, vp9_col_scan_8x8[64]) = {
+DECLARE_ALIGNED(16, const int16_t, vp9_col_scan_8x8[64]) = {
   0,  8, 16,  1, 24,  9, 32, 17,
   2, 40, 25, 10, 33, 18, 48,  3,
   26, 41, 11, 56, 19, 34,  4, 49,
@@ -93,7 +93,7 @@ DECLARE_ALIGNED(16, const int, vp9_col_scan_8x8[64]) = {
   31, 61, 39, 54, 47, 62, 55, 63,
 };
 
-DECLARE_ALIGNED(16, const int, vp9_row_scan_8x8[64]) = {
+DECLARE_ALIGNED(16, const int16_t, vp9_row_scan_8x8[64]) = {
   0,  1,  2,  8,  9,  3, 16, 10,
   4, 17, 11, 24,  5, 18, 25, 12,
   19, 26, 32,  6, 13, 20, 33, 27,
@@ -104,7 +104,7 @@ DECLARE_ALIGNED(16, const int, vp9_row_scan_8x8[64]) = {
   60, 39, 61, 47, 54, 55, 62, 63,
 };
 
-DECLARE_ALIGNED(16, const int, vp9_default_scan_16x16[256]) = {
+DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_16x16[256]) = {
   0,  16,   1,  32,  17,   2,  48,  33,  18,   3,  64,  34,  49,  19,  65,  80,
   50,   4,  35,  66,  20,  81,  96,  51,   5,  36,  82,  97,  67, 112,  21,  52,
   98,  37,  83, 113,   6,  68, 128,  53,  22,  99, 114,  84,   7, 129,  38,  69,
@@ -123,7 +123,7 @@ DECLARE_ALIGNED(16, const int, vp9_default_scan_16x16[256]) = {
   190, 221, 175, 236, 237, 191, 206, 252, 222, 253, 207, 238, 223, 254, 239, 255,
 };
 
-DECLARE_ALIGNED(16, const int, vp9_col_scan_16x16[256]) = {
+DECLARE_ALIGNED(16, const int16_t, vp9_col_scan_16x16[256]) = {
   0,  16,  32,  48,   1,  64,  17,  80,  33,  96,  49,   2,  65, 112,  18,  81,
   34, 128,  50,  97,   3,  66, 144,  19, 113,  35,  82, 160,  98,  51, 129,   4,
   67, 176,  20, 114, 145,  83,  36,  99, 130,  52, 192,   5, 161,  68, 115,  21,
@@ -142,7 +142,7 @@ DECLARE_ALIGNED(16, const int, vp9_col_scan_16x16[256]) = {
   159, 190, 221, 252, 175, 206, 237, 191, 253, 222, 238, 207, 254, 223, 239, 255,
 };
 
-DECLARE_ALIGNED(16, const int, vp9_row_scan_16x16[256]) = {
+DECLARE_ALIGNED(16, const int16_t, vp9_row_scan_16x16[256]) = {
   0,   1,   2,  16,   3,  17,   4,  18,  32,   5,  33,  19,   6,  34,  48,  20,
   49,   7,  35,  21,  50,  64,   8,  36,  65,  22,  51,  37,  80,   9,  66,  52,
   23,  38,  81,  67,  10,  53,  24,  82,  68,  96,  39,  11,  54,  83,  97,  69,
@@ -161,7 +161,7 @@ DECLARE_ALIGNED(16, const int, vp9_row_scan_16x16[256]) = {
   190, 251, 221, 191, 206, 236, 207, 237, 252, 222, 253, 223, 238, 239, 254, 255,
 };
 
-DECLARE_ALIGNED(16, const int, vp9_default_scan_32x32[1024]) = {
+DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_32x32[1024]) = {
   0,   32,    1,   64,   33,    2,   96,   65,   34,  128,    3,   97,   66,  160,  129,   35,   98,    4,   67,  130,  161,  192,   36,   99,  224,    5,  162,  193,   68,  131,   37,  100,
   225,  194,  256,  163,   69,  132,    6,  226,  257,  288,  195,  101,  164,   38,  258,    7,  227,  289,  133,  320,   70,  196,  165,  290,  259,  228,   39,  321,  102,  352,    8,  197,
   71,  134,  322,  291,  260,  353,  384,  229,  166,  103,   40,  354,  323,  292,  135,  385,  198,  261,   72,    9,  416,  167,  386,  355,  230,  324,  104,  293,   41,  417,  199,  136,
@@ -460,28 +460,39 @@ void vp9_default_coef_probs(VP9_COMMON *pc) {
 // in {top, left, topleft, topright, bottomleft} order
 // for each position in raster scan order.
 // -1 indicates the neighbor does not exist.
-DECLARE_ALIGNED(16, int,
+DECLARE_ALIGNED(16, int16_t,
                 vp9_default_scan_4x4_neighbors[16 * MAX_NEIGHBORS]);
-DECLARE_ALIGNED(16, int,
+DECLARE_ALIGNED(16, int16_t,
                 vp9_col_scan_4x4_neighbors[16 * MAX_NEIGHBORS]);
-DECLARE_ALIGNED(16, int,
+DECLARE_ALIGNED(16, int16_t,
                 vp9_row_scan_4x4_neighbors[16 * MAX_NEIGHBORS]);
-DECLARE_ALIGNED(16, int,
+DECLARE_ALIGNED(16, int16_t,
                 vp9_col_scan_8x8_neighbors[64 * MAX_NEIGHBORS]);
-DECLARE_ALIGNED(16, int,
+DECLARE_ALIGNED(16, int16_t,
                 vp9_row_scan_8x8_neighbors[64 * MAX_NEIGHBORS]);
-DECLARE_ALIGNED(16, int,
+DECLARE_ALIGNED(16, int16_t,
                 vp9_default_scan_8x8_neighbors[64 * MAX_NEIGHBORS]);
-DECLARE_ALIGNED(16, int,
+DECLARE_ALIGNED(16, int16_t,
                 vp9_col_scan_16x16_neighbors[256 * MAX_NEIGHBORS]);
-DECLARE_ALIGNED(16, int,
+DECLARE_ALIGNED(16, int16_t,
                 vp9_row_scan_16x16_neighbors[256 * MAX_NEIGHBORS]);
-DECLARE_ALIGNED(16, int,
+DECLARE_ALIGNED(16, int16_t,
                 vp9_default_scan_16x16_neighbors[256 * MAX_NEIGHBORS]);
-DECLARE_ALIGNED(16, int,
+DECLARE_ALIGNED(16, int16_t,
                 vp9_default_scan_32x32_neighbors[1024 * MAX_NEIGHBORS]);
 
-static int find_in_scan(const int *scan, int l, int idx) {
+DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_4x4[16]);
+DECLARE_ALIGNED(16, int16_t, vp9_col_iscan_4x4[16]);
+DECLARE_ALIGNED(16, int16_t, vp9_row_iscan_4x4[16]);
+DECLARE_ALIGNED(16, int16_t, vp9_col_iscan_8x8[64]);
+DECLARE_ALIGNED(16, int16_t, vp9_row_iscan_8x8[64]);
+DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_8x8[64]);
+DECLARE_ALIGNED(16, int16_t, vp9_col_iscan_16x16[256]);
+DECLARE_ALIGNED(16, int16_t, vp9_row_iscan_16x16[256]);
+DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_16x16[256]);
+DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_32x32[1024]);
+
+static int find_in_scan(const int16_t *scan, int l, int idx) {
   int n, l2 = l * l;
   for (n = 0; n < l2; n++) {
     int rc = scan[n];
@@ -491,13 +502,16 @@ static int find_in_scan(const int *scan, int l, int idx) {
   assert(0);
   return -1;
 }
-static void init_scan_neighbors(const int *scan, int l, int *neighbors,
+static void init_scan_neighbors(const int16_t *scan,
+                                int16_t *iscan,
+                                int l, int16_t *neighbors,
                                 int max_neighbors) {
   int l2 = l * l;
   int n, i, j;
 
   for (n = 0; n < l2; n++) {
     int rc = scan[n];
+    iscan[n] = find_in_scan(scan, l, n);
     assert(max_neighbors == MAX_NEIGHBORS);
     i = rc / l;
     j = rc % l;
@@ -541,29 +555,29 @@ static void init_scan_neighbors(const int *scan, int l, int *neighbors,
 }
 
 void vp9_init_neighbors() {
-  init_scan_neighbors(vp9_default_scan_4x4, 4,
+  init_scan_neighbors(vp9_default_scan_4x4, vp9_default_iscan_4x4, 4,
                       vp9_default_scan_4x4_neighbors, MAX_NEIGHBORS);
-  init_scan_neighbors(vp9_row_scan_4x4, 4,
+  init_scan_neighbors(vp9_row_scan_4x4, vp9_row_iscan_4x4, 4,
                       vp9_row_scan_4x4_neighbors, MAX_NEIGHBORS);
-  init_scan_neighbors(vp9_col_scan_4x4, 4,
+  init_scan_neighbors(vp9_col_scan_4x4, vp9_col_iscan_4x4, 4,
                       vp9_col_scan_4x4_neighbors, MAX_NEIGHBORS);
-  init_scan_neighbors(vp9_default_scan_8x8, 8,
+  init_scan_neighbors(vp9_default_scan_8x8, vp9_default_iscan_8x8, 8,
                       vp9_default_scan_8x8_neighbors, MAX_NEIGHBORS);
-  init_scan_neighbors(vp9_row_scan_8x8, 8,
+  init_scan_neighbors(vp9_row_scan_8x8, vp9_row_iscan_8x8, 8,
                       vp9_row_scan_8x8_neighbors, MAX_NEIGHBORS);
-  init_scan_neighbors(vp9_col_scan_8x8, 8,
+  init_scan_neighbors(vp9_col_scan_8x8, vp9_col_iscan_8x8, 8,
                       vp9_col_scan_8x8_neighbors, MAX_NEIGHBORS);
-  init_scan_neighbors(vp9_default_scan_16x16, 16,
+  init_scan_neighbors(vp9_default_scan_16x16, vp9_default_iscan_16x16, 16,
                       vp9_default_scan_16x16_neighbors, MAX_NEIGHBORS);
-  init_scan_neighbors(vp9_row_scan_16x16, 16,
+  init_scan_neighbors(vp9_row_scan_16x16, vp9_row_iscan_16x16, 16,
                       vp9_row_scan_16x16_neighbors, MAX_NEIGHBORS);
-  init_scan_neighbors(vp9_col_scan_16x16, 16,
+  init_scan_neighbors(vp9_col_scan_16x16, vp9_col_iscan_16x16, 16,
                       vp9_col_scan_16x16_neighbors, MAX_NEIGHBORS);
-  init_scan_neighbors(vp9_default_scan_32x32, 32,
+  init_scan_neighbors(vp9_default_scan_32x32, vp9_default_iscan_32x32, 32,
                       vp9_default_scan_32x32_neighbors, MAX_NEIGHBORS);
 }
 
-const int *vp9_get_coef_neighbors_handle(const int *scan, int *pad) {
+const int16_t *vp9_get_coef_neighbors_handle(const int16_t *scan, int *pad) {
   if (scan == vp9_default_scan_4x4) {
     *pad = MAX_NEIGHBORS;
     return vp9_default_scan_4x4_neighbors;
index 665b5d8..5937efa 100644 (file)
@@ -99,22 +99,39 @@ typedef vp9_prob vp9_coeff_probs[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]
 
 struct VP9Common;
 void vp9_default_coef_probs(struct VP9Common *);
-extern DECLARE_ALIGNED(16, const int, vp9_default_scan_4x4[16]);
+extern DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_4x4[16]);
 
-extern DECLARE_ALIGNED(16, const int, vp9_col_scan_4x4[16]);
-extern DECLARE_ALIGNED(16, const int, vp9_row_scan_4x4[16]);
+extern DECLARE_ALIGNED(16, const int16_t, vp9_col_scan_4x4[16]);
+extern DECLARE_ALIGNED(16, const int16_t, vp9_row_scan_4x4[16]);
 
-extern DECLARE_ALIGNED(64, const int, vp9_default_scan_8x8[64]);
+extern DECLARE_ALIGNED(64, const int16_t, vp9_default_scan_8x8[64]);
 
-extern DECLARE_ALIGNED(16, const int, vp9_col_scan_8x8[64]);
-extern DECLARE_ALIGNED(16, const int, vp9_row_scan_8x8[64]);
+extern DECLARE_ALIGNED(16, const int16_t, vp9_col_scan_8x8[64]);
+extern DECLARE_ALIGNED(16, const int16_t, vp9_row_scan_8x8[64]);
 
-extern DECLARE_ALIGNED(16, const int, vp9_default_scan_16x16[256]);
+extern DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_16x16[256]);
 
-extern DECLARE_ALIGNED(16, const int, vp9_col_scan_16x16[256]);
-extern DECLARE_ALIGNED(16, const int, vp9_row_scan_16x16[256]);
+extern DECLARE_ALIGNED(16, const int16_t, vp9_col_scan_16x16[256]);
+extern DECLARE_ALIGNED(16, const int16_t, vp9_row_scan_16x16[256]);
 
-extern DECLARE_ALIGNED(16, const int, vp9_default_scan_32x32[1024]);
+extern DECLARE_ALIGNED(16, const int16_t, vp9_default_scan_32x32[1024]);
+
+extern DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_4x4[16]);
+
+extern DECLARE_ALIGNED(16, int16_t, vp9_col_iscan_4x4[16]);
+extern DECLARE_ALIGNED(16, int16_t, vp9_row_iscan_4x4[16]);
+
+extern DECLARE_ALIGNED(64, int16_t, vp9_default_iscan_8x8[64]);
+
+extern DECLARE_ALIGNED(16, int16_t, vp9_col_iscan_8x8[64]);
+extern DECLARE_ALIGNED(16, int16_t, vp9_row_iscan_8x8[64]);
+
+extern DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_16x16[256]);
+
+extern DECLARE_ALIGNED(16, int16_t, vp9_col_iscan_16x16[256]);
+extern DECLARE_ALIGNED(16, int16_t, vp9_row_iscan_16x16[256]);
+
+extern DECLARE_ALIGNED(16, int16_t, vp9_default_iscan_32x32[1024]);
 
 void vp9_coef_tree_initialize(void);
 void vp9_adapt_coef_probs(struct VP9Common *);
@@ -149,7 +166,8 @@ static int get_coef_band(const uint8_t * band_translate, int coef_index) {
 }
 
 #define MAX_NEIGHBORS 2
-static INLINE int get_coef_context(const int *scan, const int *neighbors,
+static INLINE int get_coef_context(const int16_t *scan,
+                                   const int16_t *neighbors,
                                    int nb_pad, uint8_t *token_cache,
                                    int c, int l) {
   int eob = l;
@@ -169,7 +187,7 @@ static INLINE int get_coef_context(const int *scan, const int *neighbors,
   }
 }
 
-const int *vp9_get_coef_neighbors_handle(const int *scan, int *pad);
+const int16_t *vp9_get_coef_neighbors_handle(const int16_t *scan, int *pad);
 
 
 // 128 lists of probabilities are stored for the following ONE node probs:
@@ -198,7 +216,7 @@ void vp9_model_to_full_probs(const vp9_prob *model, vp9_prob *full);
 
 extern const vp9_prob vp9_modelcoefprobs[COEFPROB_MODELS][ENTROPY_NODES - 1];
 
-static INLINE const int* get_scan_4x4(TX_TYPE tx_type) {
+static INLINE const int16_t* get_scan_4x4(TX_TYPE tx_type) {
   switch (tx_type) {
     case ADST_DCT:
       return vp9_row_scan_4x4;
@@ -209,7 +227,18 @@ static INLINE const int* get_scan_4x4(TX_TYPE tx_type) {
   }
 }
 
-static INLINE const int* get_scan_8x8(TX_TYPE tx_type) {
+static INLINE const int16_t* get_iscan_4x4(TX_TYPE tx_type) {
+  switch (tx_type) {
+    case ADST_DCT:
+      return vp9_row_iscan_4x4;
+    case DCT_ADST:
+      return vp9_col_iscan_4x4;
+    default:
+      return vp9_default_iscan_4x4;
+  }
+}
+
+static INLINE const int16_t* get_scan_8x8(TX_TYPE tx_type) {
   switch (tx_type) {
     case ADST_DCT:
       return vp9_row_scan_8x8;
@@ -220,7 +249,18 @@ static INLINE const int* get_scan_8x8(TX_TYPE tx_type) {
   }
 }
 
-static INLINE const int* get_scan_16x16(TX_TYPE tx_type) {
+static INLINE const int16_t* get_iscan_8x8(TX_TYPE tx_type) {
+  switch (tx_type) {
+    case ADST_DCT:
+      return vp9_row_iscan_8x8;
+    case DCT_ADST:
+      return vp9_col_iscan_8x8;
+    default:
+      return vp9_default_iscan_8x8;
+  }
+}
+
+static INLINE const int16_t* get_scan_16x16(TX_TYPE tx_type) {
   switch (tx_type) {
     case ADST_DCT:
       return vp9_row_scan_16x16;
@@ -231,6 +271,17 @@ static INLINE const int* get_scan_16x16(TX_TYPE tx_type) {
   }
 }
 
+static INLINE const int16_t* get_iscan_16x16(TX_TYPE tx_type) {
+  switch (tx_type) {
+    case ADST_DCT:
+      return vp9_row_iscan_16x16;
+    case DCT_ADST:
+      return vp9_col_iscan_16x16;
+    default:
+      return vp9_default_iscan_16x16;
+  }
+}
+
 enum { VP9_COEF_UPDATE_PROB = 252 };
 
 #endif  // VP9_COMMON_VP9_ENTROPY_H_
index ad39300..abbf73f 100644 (file)
@@ -129,10 +129,10 @@ typedef enum {
 typedef struct VP9Common {
   struct vpx_internal_error_info  error;
 
-  DECLARE_ALIGNED(16, int16_t, y_dequant[QINDEX_RANGE][2]);
-  DECLARE_ALIGNED(16, int16_t, uv_dequant[QINDEX_RANGE][2]);
+  DECLARE_ALIGNED(16, int16_t, y_dequant[QINDEX_RANGE][8]);
+  DECLARE_ALIGNED(16, int16_t, uv_dequant[QINDEX_RANGE][8]);
 #if CONFIG_ALPHA
-  DECLARE_ALIGNED(16, int16_t, a_dequant[QINDEX_RANGE][2]);
+  DECLARE_ALIGNED(16, int16_t, a_dequant[QINDEX_RANGE][8]);
 #endif
 
   int width;
index 74ee545..e7cefa5 100644 (file)
@@ -564,6 +564,11 @@ specialize vp9_block_error sse2
 prototype void vp9_subtract_block "int rows, int cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr, ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride"
 specialize vp9_subtract_block sse2
 
+[ $arch = "x86_64" ] && ssse3_x86_64=ssse3
+
+prototype void vp9_quantize_b "int16_t *coeff_ptr, intptr_t n_coeffs, int skip_block, int16_t *zbin_ptr, int16_t *round_ptr, int16_t *quant_ptr, int16_t *quant_shift_ptr, int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr, int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan"
+specialize vp9_quantize_b $ssse3_x86_64
+
 #
 # Structured Similarity (SSIM)
 #
index bc1c58a..81403a4 100644 (file)
@@ -113,7 +113,7 @@ static int decode_coefs(FRAME_CONTEXT *fc, const MACROBLOCKD *xd,
   vp9_prob *prob;
   vp9_coeff_count_model *coef_counts;
   const int ref = xd->mode_info_context->mbmi.ref_frame[0] != INTRA_FRAME;
-  const int *scan, *nb;
+  const int16_t *scan, *nb;
   uint8_t token_cache[1024];
   const uint8_t * band_translate;
 #if CONFIG_BALANCED_COEFTREE
index 1e7aad7..43e26be 100644 (file)
@@ -66,7 +66,7 @@ struct macroblock_plane {
 
   // Quantizer setings
   int16_t *quant;
-  uint8_t *quant_shift;
+  int16_t *quant_shift;
   int16_t *zbin;
   int16_t *round;
 
index e696248..253f1ae 100644 (file)
@@ -109,8 +109,8 @@ static const int plane_rd_mult[4] = {
 
 // This function is a place holder for now but may ultimately need
 // to scan previous tokens to work out the correct context.
-static int trellis_get_coeff_context(const int *scan,
-                                     const int *nb,
+static int trellis_get_coeff_context(const int16_t *scan,
+                                     const int16_t *nb,
                                      int idx, int token,
                                      uint8_t *token_cache,
                                      int pad, int l) {
@@ -142,7 +142,7 @@ static void optimize_b(VP9_COMMON *const cm, MACROBLOCK *mb,
   PLANE_TYPE type = xd->plane[plane].plane_type;
   int err_mult = plane_rd_mult[type];
   int default_eob, pad;
-  int const *scan, *nb;
+  const int16_t *scan, *nb;
   const int mul = 1 + (tx_size == TX_32X32);
   uint8_t token_cache[1024];
   const int ib = txfrm_block_to_raster_block(xd, bsize, plane,
index 980095b..ba11a93 100644 (file)
@@ -253,21 +253,21 @@ enum BlockSize {
 
 typedef struct VP9_COMP {
 
-  DECLARE_ALIGNED(16, short, y_quant[QINDEX_RANGE][16]);
-  DECLARE_ALIGNED(16, unsigned char, y_quant_shift[QINDEX_RANGE][16]);
-  DECLARE_ALIGNED(16, short, y_zbin[QINDEX_RANGE][16]);
-  DECLARE_ALIGNED(16, short, y_round[QINDEX_RANGE][16]);
+  DECLARE_ALIGNED(16, int16_t, y_quant[QINDEX_RANGE][8]);
+  DECLARE_ALIGNED(16, int16_t, y_quant_shift[QINDEX_RANGE][8]);
+  DECLARE_ALIGNED(16, int16_t, y_zbin[QINDEX_RANGE][8]);
+  DECLARE_ALIGNED(16, int16_t, y_round[QINDEX_RANGE][8]);
 
-  DECLARE_ALIGNED(16, short, uv_quant[QINDEX_RANGE][16]);
-  DECLARE_ALIGNED(16, unsigned char, uv_quant_shift[QINDEX_RANGE][16]);
-  DECLARE_ALIGNED(16, short, uv_zbin[QINDEX_RANGE][16]);
-  DECLARE_ALIGNED(16, short, uv_round[QINDEX_RANGE][16]);
+  DECLARE_ALIGNED(16, int16_t, uv_quant[QINDEX_RANGE][8]);
+  DECLARE_ALIGNED(16, int16_t, uv_quant_shift[QINDEX_RANGE][8]);
+  DECLARE_ALIGNED(16, int16_t, uv_zbin[QINDEX_RANGE][8]);
+  DECLARE_ALIGNED(16, int16_t, uv_round[QINDEX_RANGE][8]);
 
 #if CONFIG_ALPHA
-  DECLARE_ALIGNED(16, short, a_quant[QINDEX_RANGE][16]);
-  DECLARE_ALIGNED(16, unsigned char, a_quant_shift[QINDEX_RANGE][16]);
-  DECLARE_ALIGNED(16, short, a_zbin[QINDEX_RANGE][16]);
-  DECLARE_ALIGNED(16, short, a_round[QINDEX_RANGE][16]);
+  DECLARE_ALIGNED(16, int16_t, a_quant[QINDEX_RANGE][8]);
+  DECLARE_ALIGNED(16, int16_t, a_quant_shift[QINDEX_RANGE][8]);
+  DECLARE_ALIGNED(16, int16_t, a_zbin[QINDEX_RANGE][8]);
+  DECLARE_ALIGNED(16, int16_t, a_round[QINDEX_RANGE][8]);
 #endif
 
   MACROBLOCK mb;
index e68a48b..6f2e13a 100644 (file)
 extern int enc_debug;
 #endif
 
-static void quantize(int16_t *coeff_ptr, int n_coeffs, int skip_block,
-                     int16_t *zbin_ptr, int16_t *round_ptr, int16_t *quant_ptr,
-                     uint8_t *quant_shift_ptr,
-                     int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr,
-                     int16_t *dequant_ptr, int zbin_oq_value,
-                     uint16_t *eob_ptr, const int *scan) {
+void vp9_quantize_b_c(int16_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
+                      int16_t *zbin_ptr, int16_t *round_ptr, int16_t *quant_ptr,
+                      int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
+                      int16_t *dqcoeff_ptr, int16_t *dequant_ptr,
+                      int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan,
+                      const int16_t *iscan) {
   int i, rc, eob;
   int zbins[2], nzbins[2], zbin;
   int x, y, z, sz;
@@ -69,8 +69,8 @@ static void quantize(int16_t *coeff_ptr, int n_coeffs, int skip_block,
 
       if (x >= zbin) {
         x += (round_ptr[rc != 0]);
-        y  = ((int)(((int)(x * quant_ptr[rc != 0]) >> 16) + x))
-            >> quant_shift_ptr[rc != 0];            // quantize (x)
+        y  = (((int)(((int)(x * quant_ptr[rc != 0]) >> 16) + x)) *
+              quant_shift_ptr[rc != 0]) >> 16;      // quantize (x)
         x  = (y ^ sz) - sz;                         // get the sign back
         qcoeff_ptr[rc]  = x;                        // write to destination
         dqcoeff_ptr[rc] = x * dequant_ptr[rc != 0];  // dequantized value
@@ -85,12 +85,13 @@ static void quantize(int16_t *coeff_ptr, int n_coeffs, int skip_block,
 }
 
 // This function works well for large transform size.
-static void quantize_sparse(int16_t *coeff_ptr, int n_coeffs, int skip_block,
+static void quantize_sparse(int16_t *coeff_ptr, intptr_t n_coeffs,
+                            int skip_block,
                             int16_t *zbin_ptr, int16_t *round_ptr,
-                            int16_t *quant_ptr, uint8_t *quant_shift_ptr,
+                            int16_t *quant_ptr, int16_t *quant_shift_ptr,
                             int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr,
                             int16_t *dequant_ptr, int zbin_oq_value,
-                            uint16_t *eob_ptr, const int *scan,
+                            uint16_t *eob_ptr, const int16_t *scan,
                             int *idx_arr) {
   int i, rc, eob;
   int zbins[2], nzbins[2], zbin;
@@ -134,8 +135,8 @@ static void quantize_sparse(int16_t *coeff_ptr, int n_coeffs, int skip_block,
 
       if (x >= zbin) {
         x += (round_ptr[rc != 0]);
-        y  = ((int)(((int)(x * quant_ptr[rc != 0]) >> 16) + x))
-            >> quant_shift_ptr[rc != 0];            // quantize (x)
+        y  = (((int)(((int)(x * quant_ptr[rc != 0]) >> 16) + x)) *
+              quant_shift_ptr[rc != 0]) >> 16;      // quantize (x)
 
         x  = (y ^ sz) - sz;                         // get the sign back
         qcoeff_ptr[rc]  = x;                        // write to destination
@@ -153,21 +154,25 @@ static void quantize_sparse(int16_t *coeff_ptr, int n_coeffs, int skip_block,
 void vp9_quantize(MACROBLOCK *mb, int plane, int block, int n_coeffs,
                   TX_TYPE tx_type) {
   MACROBLOCKD *const xd = &mb->e_mbd;
-  const int *scan;
+  const int16_t *scan, *iscan;
 
   // These contexts may be available in the caller
   switch (n_coeffs) {
     case 4 * 4:
       scan = get_scan_4x4(tx_type);
+      iscan = get_iscan_4x4(tx_type);
       break;
     case 8 * 8:
       scan = get_scan_8x8(tx_type);
+      iscan = get_iscan_8x8(tx_type);
       break;
     case 16 * 16:
       scan = get_scan_16x16(tx_type);
+      iscan = get_iscan_16x16(tx_type);
       break;
     default:
       scan = vp9_default_scan_32x32;
+      iscan = vp9_default_iscan_32x32;
       break;
   }
 
@@ -190,18 +195,18 @@ void vp9_quantize(MACROBLOCK *mb, int plane, int block, int n_coeffs,
                     scan, idx_arr);
   }
   else {
-    quantize(BLOCK_OFFSET(mb->plane[plane].coeff, block, 16),
-             n_coeffs, mb->skip_block,
-             mb->plane[plane].zbin,
-             mb->plane[plane].round,
-             mb->plane[plane].quant,
-             mb->plane[plane].quant_shift,
-             BLOCK_OFFSET(xd->plane[plane].qcoeff, block, 16),
-             BLOCK_OFFSET(xd->plane[plane].dqcoeff, block, 16),
-             xd->plane[plane].dequant,
-             mb->plane[plane].zbin_extra,
-             &xd->plane[plane].eobs[block],
-             scan);
+    vp9_quantize_b(BLOCK_OFFSET(mb->plane[plane].coeff, block, 16),
+                   n_coeffs, mb->skip_block,
+                   mb->plane[plane].zbin,
+                   mb->plane[plane].round,
+                   mb->plane[plane].quant,
+                   mb->plane[plane].quant_shift,
+                   BLOCK_OFFSET(xd->plane[plane].qcoeff, block, 16),
+                   BLOCK_OFFSET(xd->plane[plane].dqcoeff, block, 16),
+                   xd->plane[plane].dequant,
+                   mb->plane[plane].zbin_extra,
+                   &xd->plane[plane].eobs[block],
+                   scan, iscan);
   }
 }
 
@@ -209,9 +214,10 @@ void vp9_regular_quantize_b_4x4(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
                                 int y_blocks) {
   MACROBLOCKD *const xd = &mb->e_mbd;
   const struct plane_block_idx pb_idx = plane_block_idx(y_blocks, b_idx);
-  const int *pt_scan = get_scan_4x4(tx_type);
+  const int16_t *scan = get_scan_4x4(tx_type);
+  const int16_t *iscan = get_iscan_4x4(tx_type);
 
-  quantize(BLOCK_OFFSET(mb->plane[pb_idx.plane].coeff, pb_idx.block, 16),
+  vp9_quantize_b(BLOCK_OFFSET(mb->plane[pb_idx.plane].coeff, pb_idx.block, 16),
            16, mb->skip_block,
            mb->plane[pb_idx.plane].zbin,
            mb->plane[pb_idx.plane].round,
@@ -222,10 +228,10 @@ void vp9_regular_quantize_b_4x4(MACROBLOCK *mb, int b_idx, TX_TYPE tx_type,
            xd->plane[pb_idx.plane].dequant,
            mb->plane[pb_idx.plane].zbin_extra,
            &xd->plane[pb_idx.plane].eobs[pb_idx.block],
-           pt_scan);
+           scan, iscan);
 }
 
-static void invert_quant(int16_t *quant, uint8_t *shift, int d) {
+static void invert_quant(int16_t *quant, int16_t *shift, int d) {
   unsigned t;
   int l;
   t = d;
@@ -233,7 +239,7 @@ static void invert_quant(int16_t *quant, uint8_t *shift, int d) {
     t >>= 1;
   t = 1 + (1 << (16 + l)) / d;
   *quant = (int16_t)(t - (1 << 16));
-  *shift = l;
+  *shift = 1 << (16 - l);
 }
 
 void vp9_init_quantizer(VP9_COMP *cpi) {
@@ -252,6 +258,7 @@ void vp9_init_quantizer(VP9_COMP *cpi) {
       qzbin_factor = 64;
       qrounding_factor = 64;
     }
+
     // dc values
     quant_val = vp9_dc_quant(q, cpi->common.y_dc_delta_q);
     invert_quant(cpi->y_quant[q] + 0, cpi->y_quant_shift[q] + 0, quant_val);
@@ -274,32 +281,46 @@ void vp9_init_quantizer(VP9_COMP *cpi) {
 #endif
 
     quant_val = vp9_ac_quant(q, 0);
+    invert_quant(cpi->y_quant[q] + 1, cpi->y_quant_shift[q] + 1, quant_val);
+    cpi->y_zbin[q][1] = ROUND_POWER_OF_TWO(qzbin_factor * quant_val, 7);
+    cpi->y_round[q][1] = (qrounding_factor * quant_val) >> 7;
     cpi->common.y_dequant[q][1] = quant_val;
+
     quant_uv_val = vp9_ac_quant(q, cpi->common.uv_ac_delta_q);
+    invert_quant(cpi->uv_quant[q] + 1, cpi->uv_quant_shift[q] + 1,
+                 quant_uv_val);
+    cpi->uv_zbin[q][1] = ROUND_POWER_OF_TWO(qzbin_factor * quant_uv_val, 7);
+    cpi->uv_round[q][1] = (qrounding_factor * quant_uv_val) >> 7;
     cpi->common.uv_dequant[q][1] = quant_uv_val;
+
 #if CONFIG_ALPHA
     quant_alpha_val = vp9_ac_quant(q, cpi->common.a_ac_delta_q);
+    invert_quant(cpi->a_quant[q] + 1, cpi->a_quant_shift[q] + 1,
+                 quant_alpha_val);
+    cpi->a_zbin[q][1] = ROUND_POWER_OF_TWO(qzbin_factor * quant_alpha_val, 7);
+    cpi->a_round[q][1] = (qrounding_factor * quant_alpha_val) >> 7;
     cpi->common.a_dequant[q][1] = quant_alpha_val;
 #endif
-    // all the 4x4 ac values =;
-    for (i = 1; i < 16; i++) {
-      int rc = vp9_default_scan_4x4[i];
 
-      invert_quant(cpi->y_quant[q] + rc, cpi->y_quant_shift[q] + rc, quant_val);
-      cpi->y_zbin[q][rc] = ROUND_POWER_OF_TWO(qzbin_factor * quant_val, 7);
-      cpi->y_round[q][rc] = (qrounding_factor * quant_val) >> 7;
+    for (i = 2; i < 8; i++) {
+      cpi->y_quant[q][i] = cpi->y_quant[q][1];
+      cpi->y_quant_shift[q][i] = cpi->y_quant_shift[q][1];
+      cpi->y_zbin[q][i] = cpi->y_zbin[q][1];
+      cpi->y_round[q][i] = cpi->y_round[q][1];
+      cpi->common.y_dequant[q][i] = cpi->common.y_dequant[q][1];
 
-      invert_quant(cpi->uv_quant[q] + rc, cpi->uv_quant_shift[q] + rc,
-        quant_uv_val);
-      cpi->uv_zbin[q][rc] = ROUND_POWER_OF_TWO(qzbin_factor * quant_uv_val, 7);
-      cpi->uv_round[q][rc] = (qrounding_factor * quant_uv_val) >> 7;
+      cpi->uv_quant[q][i] = cpi->uv_quant[q][1];
+      cpi->uv_quant_shift[q][i] = cpi->uv_quant_shift[q][1];
+      cpi->uv_zbin[q][i] = cpi->uv_zbin[q][1];
+      cpi->uv_round[q][i] = cpi->uv_round[q][1];
+      cpi->common.uv_dequant[q][i] = cpi->common.uv_dequant[q][1];
 
 #if CONFIG_ALPHA
-      invert_quant(cpi->a_quant[q] + rc, cpi->a_quant_shift[q] + rc,
-          quant_alpha_val);
-      cpi->a_zbin[q][rc] =
-          ROUND_POWER_OF_TWO(qzbin_factor * quant_alpha_val, 7);
-      cpi->a_round[q][rc] = (qrounding_factor * quant_alpha_val) >> 7;
+      cpi->a_quant[q][i] = cpi->a_quant[q][1];
+      cpi->a_quant_shift[q][i] = cpi->a_quant_shift[q][1];
+      cpi->a_zbin[q][i] = cpi->a_zbin[q][1];
+      cpi->a_round[q][i] = cpi->a_round[q][1];
+      cpi->common.a_dequant[q][i] = cpi->common.a_dequant[q][1];
 #endif
     }
   }
index 5b4b752..0cf4130 100644 (file)
@@ -305,7 +305,7 @@ static INLINE int cost_coeffs(VP9_COMMON *const cm, MACROBLOCK *mb,
   int pt;
   int c = 0;
   int cost = 0, pad;
-  const int *scan, *nb;
+  const int16_t *scan, *nb;
   const int eob = xd->plane[plane].eobs[block];
   const int16_t *qcoeff_ptr = BLOCK_OFFSET(xd->plane[plane].qcoeff, block, 16);
   const int ref = mbmi->ref_frame[0] != INTRA_FRAME;
index abf05c2..91c2a14 100644 (file)
@@ -90,8 +90,6 @@ static void fill_value_tokens() {
   vp9_dct_value_cost_ptr   = dct_value_cost + DCT_MAX_VALUE;
 }
 
-extern const int *vp9_get_coef_neighbors_handle(const int *scan, int *pad);
-
 struct tokenize_b_args {
   VP9_COMP *cpi;
   MACROBLOCKD *xd;
@@ -127,7 +125,7 @@ static void tokenize_b(int plane, int block, BLOCK_SIZE_TYPE bsize,
   ENTROPY_CONTEXT *L = xd->plane[plane].left_context + loff;
   int seg_eob, default_eob, pad;
   const int segment_id = mbmi->segment_id;
-  const int *scan, *nb;
+  const int16_t *scan, *nb;
   vp9_coeff_count *counts;
   vp9_coeff_probs_model *coef_probs;
   const int ref = mbmi->ref_frame[0] != INTRA_FRAME;
diff --git a/vp9/encoder/x86/vp9_quantize_ssse3.asm b/vp9/encoder/x86/vp9_quantize_ssse3.asm
new file mode 100644 (file)
index 0000000..665bafa
--- /dev/null
@@ -0,0 +1,165 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+%include "third_party/x86inc/x86inc.asm"
+
+SECTION_RODATA
+pw_1: times 8 dw 1
+
+SECTION .text
+
+INIT_XMM ssse3
+cglobal quantize_b, 0, 6, 15, coeff, ncoeff, skip, zbin, round, quant, \
+                              shift, qcoeff, dqcoeff, dequant, zbin_oq, \
+                              eob, scan, iscan
+  cmp                    dword skipm, 0
+  jne .blank
+
+  ; actual quantize loop - setup pointers, rounders, etc.
+  movifnidn                   coeffq, coeffmp
+  movifnidn                  ncoeffq, ncoeffmp
+  mov                             r2, dequantmp
+  movifnidn                    zbinq, zbinmp
+  movifnidn                   roundq, roundmp
+  movifnidn                   quantq, quantmp
+  movd                            m4, dword zbin_oqm       ; m4 = zbin_oq
+  mova                            m0, [zbinq]              ; m0 = zbin
+  punpcklwd                       m4, m4
+  mova                            m1, [roundq]             ; m1 = round
+  pshufd                          m4, m4, 0
+  mova                            m2, [quantq]             ; m2 = quant
+  paddw                           m0, m4                   ; m0 = zbin + zbin_oq
+  mova                            m3, [r2q]                ; m3 = dequant
+  psubw                           m0, [pw_1]
+  mov                             r2, shiftmp
+  mov                             r3, qcoeffmp
+  mova                            m4, [r2]                 ; m4 = shift
+  mov                             r4, dqcoeffmp
+  mov                             r5, iscanmp
+  mov                             r2, eobmp
+  pxor                            m5, m5                   ; m5 = dedicated zero
+  DEFINE_ARGS coeff, ncoeff, eob, qcoeff, dqcoeff, iscan
+  lea                         coeffq, [  coeffq+ncoeffq*2]
+  lea                         iscanq, [  iscanq+ncoeffq*2]
+  lea                        qcoeffq, [ qcoeffq+ncoeffq*2]
+  lea                       dqcoeffq, [dqcoeffq+ncoeffq*2]
+  neg                        ncoeffq
+
+  ; get DC and first 15 AC coeffs
+  mova                            m9, [  coeffq+ncoeffq*2+ 0] ; m9 = c[i]
+  mova                           m10, [  coeffq+ncoeffq*2+16] ; m10 = c[i]
+  pabsw                           m6, m9                   ; m6 = abs(m9)
+  pabsw                          m11, m10                  ; m11 = abs(m10)
+  pcmpgtw                         m7, m6, m0               ; m7 = c[i] >= zbin
+  punpckhqdq                      m0, m0
+  pcmpgtw                        m12, m11, m0              ; m12 = c[i] >= zbin
+  paddw                           m6, m1                   ; m6 += round
+  punpckhqdq                      m1, m1
+  paddw                          m11, m1                   ; m11 += round
+  pmulhw                          m8, m6, m2               ; m8 = m6*q>>16
+  punpckhqdq                      m2, m2
+  pmulhw                         m13, m11, m2              ; m13 = m11*q>>16
+  paddw                           m8, m6                   ; m8 += m6
+  paddw                          m13, m11                  ; m13 += m11
+  pmulhw                          m8, m4                   ; m8 = m8*qsh>>16
+  punpckhqdq                      m4, m4
+  pmulhw                         m13, m4                   ; m13 = m13*qsh>>16
+  psignw                          m8, m9                   ; m8 = reinsert sign
+  psignw                         m13, m10                  ; m13 = reinsert sign
+  pand                            m8, m7
+  pand                           m13, m12
+  mova        [qcoeffq+ncoeffq*2+ 0], m8
+  mova        [qcoeffq+ncoeffq*2+16], m13
+  pmullw                          m8, m3                   ; dqc[i] = qc[i] * q
+  punpckhqdq                      m3, m3
+  pmullw                         m13, m3                   ; dqc[i] = qc[i] * q
+  mova       [dqcoeffq+ncoeffq*2+ 0], m8
+  mova       [dqcoeffq+ncoeffq*2+16], m13
+  pcmpeqw                         m8, m5                   ; m8 = c[i] == 0
+  pcmpeqw                        m13, m5                   ; m13 = c[i] == 0
+  mova                            m6, [  iscanq+ncoeffq*2+ 0] ; m6 = scan[i]
+  mova                           m11, [  iscanq+ncoeffq*2+16] ; m11 = scan[i]
+  psubw                           m6, m7                   ; m6 = scan[i] + 1
+  psubw                          m11, m12                  ; m11 = scan[i] + 1
+  pandn                           m8, m6                   ; m8 = max(eob)
+  pandn                          m13, m11                  ; m13 = max(eob)
+  pmaxsw                          m8, m13
+  add                        ncoeffq, mmsize
+  jz .accumulate_eob
+
+.ac_only_loop:
+  mova                            m9, [  coeffq+ncoeffq*2+ 0] ; m9 = c[i]
+  mova                           m10, [  coeffq+ncoeffq*2+16] ; m10 = c[i]
+  pabsw                           m6, m9                   ; m6 = abs(m9)
+  pabsw                          m11, m10                  ; m11 = abs(m10)
+  pcmpgtw                         m7, m6, m0               ; m7 = c[i] >= zbin
+  pcmpgtw                        m12, m11, m0              ; m12 = c[i] >= zbin
+  paddw                           m6, m1                   ; m6 += round
+  paddw                          m11, m1                   ; m11 += round
+  pmulhw                         m14, m6, m2               ; m14 = m6*q>>16
+  pmulhw                         m13, m11, m2              ; m13 = m11*q>>16
+  paddw                          m14, m6                   ; m14 += m6
+  paddw                          m13, m11                  ; m13 += m11
+  pmulhw                         m14, m4                   ; m14 = m14*qsh>>16
+  pmulhw                         m13, m4                   ; m13 = m13*qsh>>16
+  psignw                         m14, m9                   ; m14 = reinsert sign
+  psignw                         m13, m10                  ; m13 = reinsert sign
+  pand                           m14, m7
+  pand                           m13, m12
+  mova        [qcoeffq+ncoeffq*2+ 0], m14
+  mova        [qcoeffq+ncoeffq*2+16], m13
+  pmullw                         m14, m3                   ; dqc[i] = qc[i] * q
+  pmullw                         m13, m3                   ; dqc[i] = qc[i] * q
+  mova       [dqcoeffq+ncoeffq*2+ 0], m14
+  mova       [dqcoeffq+ncoeffq*2+16], m13
+  pcmpeqw                        m14, m5                   ; m14 = c[i] == 0
+  pcmpeqw                        m13, m5                   ; m13 = c[i] == 0
+  mova                            m6, [  iscanq+ncoeffq*2+ 0] ; m6 = scan[i]
+  mova                           m11, [  iscanq+ncoeffq*2+16] ; m11 = scan[i]
+  psubw                           m6, m7                   ; m6 = scan[i] + 1
+  psubw                          m11, m12                  ; m11 = scan[i] + 1
+  pandn                          m14, m6                   ; m14 = max(eob)
+  pandn                          m13, m11                  ; m13 = max(eob)
+  pmaxsw                          m8, m14
+  pmaxsw                          m8, m13
+  add                        ncoeffq, mmsize
+  jl .ac_only_loop
+
+.accumulate_eob:
+  ; horizontally accumulate/max eobs and write into [eob] memory pointer
+  pshufd                          m7, m8, 0xe
+  pmaxsw                          m8, m7
+  pshuflw                         m7, m8, 0xe
+  pmaxsw                          m8, m7
+  pshuflw                         m7, m8, 0x1
+  pmaxsw                          m8, m7
+  pextrw                      [eobq], m8, 0
+  RET
+
+  ; skip-block, i.e. just write all zeroes
+.blank:
+  mov                             r0, dqcoeffmp
+  movifnidn                  ncoeffq, ncoeffmp
+  mov                             r2, qcoeffmp
+  mov                             r3, eobmp
+  DEFINE_ARGS dqcoeff, ncoeff, qcoeff, eob
+  lea                       dqcoeffq, [dqcoeffq+ncoeffq*2]
+  lea                        qcoeffq, [ qcoeffq+ncoeffq*2]
+  neg                        ncoeffq
+  pxor                            m7, m7
+.blank_loop:
+  mova       [dqcoeffq+ncoeffq*2+ 0], m7
+  mova       [dqcoeffq+ncoeffq*2+16], m7
+  mova        [qcoeffq+ncoeffq*2+ 0], m7
+  mova        [qcoeffq+ncoeffq*2+16], m7
+  add                        ncoeffq, mmsize
+  jl .blank_loop
+  mov                    word [eobq], 0
+  RET
index a1e9375..e5901f2 100644 (file)
@@ -89,6 +89,9 @@ VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_error_sse2.asm
 VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_subpel_variance_impl_sse2.asm
 VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_temporal_filter_apply_sse2.asm
 VP9_CX_SRCS-$(HAVE_SSE3) += encoder/x86/vp9_sad_sse3.asm
+ifeq ($(ARCH_X86_64),yes)
+VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_quantize_ssse3.asm
+endif
 VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_sad_ssse3.asm
 VP9_CX_SRCS-$(HAVE_SSE4_1) += encoder/x86/vp9_sad_sse4.asm
 VP9_CX_SRCS-$(ARCH_X86_64) += encoder/x86/vp9_ssim_opt.asm