summaryrefslogtreecommitdiffstats
path: root/media/libjpeg/simd/mips64
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /media/libjpeg/simd/mips64
parentInitial commit. (diff)
downloadfirefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.tar.xz
firefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'media/libjpeg/simd/mips64')
-rw-r--r--media/libjpeg/simd/mips64/jccolext-mmi.c455
-rw-r--r--media/libjpeg/simd/mips64/jccolor-mmi.c148
-rw-r--r--media/libjpeg/simd/mips64/jcgray-mmi.c132
-rw-r--r--media/libjpeg/simd/mips64/jcgryext-mmi.c374
-rw-r--r--media/libjpeg/simd/mips64/jcsample-mmi.c98
-rw-r--r--media/libjpeg/simd/mips64/jcsample.h28
-rw-r--r--media/libjpeg/simd/mips64/jdcolext-mmi.c415
-rw-r--r--media/libjpeg/simd/mips64/jdcolor-mmi.c139
-rw-r--r--media/libjpeg/simd/mips64/jdmerge-mmi.c149
-rw-r--r--media/libjpeg/simd/mips64/jdmrgext-mmi.c615
-rw-r--r--media/libjpeg/simd/mips64/jdsample-mmi.c304
-rw-r--r--media/libjpeg/simd/mips64/jfdctfst-mmi.c255
-rw-r--r--media/libjpeg/simd/mips64/jfdctint-mmi.c398
-rw-r--r--media/libjpeg/simd/mips64/jidctfst-mmi.c395
-rw-r--r--media/libjpeg/simd/mips64/jidctint-mmi.c571
-rw-r--r--media/libjpeg/simd/mips64/jquanti-mmi.c124
-rw-r--r--media/libjpeg/simd/mips64/jsimd.c866
-rw-r--r--media/libjpeg/simd/mips64/jsimd_mmi.h69
-rw-r--r--media/libjpeg/simd/mips64/loongson-mmintrin.h1334
19 files changed, 6869 insertions, 0 deletions
diff --git a/media/libjpeg/simd/mips64/jccolext-mmi.c b/media/libjpeg/simd/mips64/jccolext-mmi.c
new file mode 100644
index 0000000000..558eb2ab10
--- /dev/null
+++ b/media/libjpeg/simd/mips64/jccolext-mmi.c
@@ -0,0 +1,455 @@
+/*
+ * Loongson MMI optimizations for libjpeg-turbo
+ *
+ * Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
+ * Copyright (C) 2014-2015, 2019, D. R. Commander. All Rights Reserved.
+ * Copyright (C) 2016-2018, Loongson Technology Corporation Limited, BeiJing.
+ * All Rights Reserved.
+ * Authors: ZhuChen <zhuchen@loongson.cn>
+ * SunZhangzhi <sunzhangzhi-cq@loongson.cn>
+ * CaiWanwei <caiwanwei@loongson.cn>
+ * ZhangLixia <zhanglixia-hf@loongson.cn>
+ *
+ * Based on the x86 SIMD extension for IJG JPEG library
+ * Copyright (C) 1999-2006, MIYASAKA Masaru.
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/* This file is included by jccolor-mmi.c */
+
+
+#if RGB_RED == 0
+#define mmA re
+#define mmB ro
+#elif RGB_GREEN == 0
+#define mmA ge
+#define mmB go
+#elif RGB_BLUE == 0
+#define mmA be
+#define mmB bo
+#else
+#define mmA xe
+#define mmB xo
+#endif
+
+#if RGB_RED == 1
+#define mmC re
+#define mmD ro
+#elif RGB_GREEN == 1
+#define mmC ge
+#define mmD go
+#elif RGB_BLUE == 1
+#define mmC be
+#define mmD bo
+#else
+#define mmC xe
+#define mmD xo
+#endif
+
+#if RGB_RED == 2
+#define mmE re
+#define mmF ro
+#elif RGB_GREEN == 2
+#define mmE ge
+#define mmF go
+#elif RGB_BLUE == 2
+#define mmE be
+#define mmF bo
+#else
+#define mmE xe
+#define mmF xo
+#endif
+
+#if RGB_RED == 3
+#define mmG re
+#define mmH ro
+#elif RGB_GREEN == 3
+#define mmG ge
+#define mmH go
+#elif RGB_BLUE == 3
+#define mmG be
+#define mmH bo
+#else
+#define mmG xe
+#define mmH xo
+#endif
+
+
+void jsimd_rgb_ycc_convert_mmi(JDIMENSION image_width, JSAMPARRAY input_buf,
+ JSAMPIMAGE output_buf, JDIMENSION output_row,
+ int num_rows)
+{
+ JSAMPROW inptr, outptr0, outptr1, outptr2;
+ int num_cols, col;
+ __m64 re, ro, ge, go, be, bo, xe;
+#if RGB_PIXELSIZE == 4
+ __m64 xo;
+#endif
+ __m64 rgle, rghe, rglo, rgho, bgle, bghe, bglo, bgho;
+ __m64 ble, halfble, bhe, halfbhe, blo, halfblo, bho, halfbho;
+ __m64 rle, halfrle, rhe, halfrhe, rlo, halfrlo, rho, halfrho;
+ __m64 yle_rg, yhe_rg, yle_bg, yhe_bg, yle, yhe, ye;
+ __m64 ylo_rg, yho_rg, ylo_bg, yho_bg, ylo, yho, yo, y;
+ __m64 cble, cbhe, cbe, cblo, cbho, cbo, cb;
+ __m64 crle, crhe, cre, crlo, crho, cro, cr;
+
+ while (--num_rows >= 0) {
+ inptr = *input_buf++;
+ outptr0 = output_buf[0][output_row];
+ outptr1 = output_buf[1][output_row];
+ outptr2 = output_buf[2][output_row];
+ output_row++;
+
+ for (num_cols = image_width; num_cols > 0; num_cols -= 8,
+ outptr0 += 8, outptr1 += 8, outptr2 += 8) {
+
+#if RGB_PIXELSIZE == 3
+
+ if (num_cols < 8) {
+ col = num_cols * 3;
+ asm(".set noreorder\r\n"
+
+ "li $8, 1\r\n"
+ "move $9, %3\r\n"
+ "and $10, $9, $8\r\n"
+ "beqz $10, 1f\r\n"
+ "nop \r\n"
+ "subu $9, $9, 1\r\n"
+ "xor $12, $12, $12\r\n"
+ "move $13, %5\r\n"
+ PTR_ADDU "$13, $13, $9\r\n"
+ "lbu $12, 0($13)\r\n"
+
+ "1: \r\n"
+ "li $8, 2\r\n"
+ "and $10, $9, $8\r\n"
+ "beqz $10, 2f\r\n"
+ "nop \r\n"
+ "subu $9, $9, 2\r\n"
+ "xor $11, $11, $11\r\n"
+ "move $13, %5\r\n"
+ PTR_ADDU "$13, $13, $9\r\n"
+ "lhu $11, 0($13)\r\n"
+ "sll $12, $12, 16\r\n"
+ "or $12, $12, $11\r\n"
+
+ "2: \r\n"
+ "dmtc1 $12, %0\r\n"
+ "li $8, 4\r\n"
+ "and $10, $9, $8\r\n"
+ "beqz $10, 3f\r\n"
+ "nop \r\n"
+ "subu $9, $9, 4\r\n"
+ "move $13, %5\r\n"
+ PTR_ADDU "$13, $13, $9\r\n"
+ "lwu $14, 0($13)\r\n"
+ "dmtc1 $14, %1\r\n"
+ "dsll32 $12, $12, 0\r\n"
+ "or $12, $12, $14\r\n"
+ "dmtc1 $12, %0\r\n"
+
+ "3: \r\n"
+ "li $8, 8\r\n"
+ "and $10, $9, $8\r\n"
+ "beqz $10, 4f\r\n"
+ "nop \r\n"
+ "mov.s %1, %0\r\n"
+ "ldc1 %0, 0(%5)\r\n"
+ "li $9, 8\r\n"
+ "j 5f\r\n"
+ "nop \r\n"
+
+ "4: \r\n"
+ "li $8, 16\r\n"
+ "and $10, $9, $8\r\n"
+ "beqz $10, 5f\r\n"
+ "nop \r\n"
+ "mov.s %2, %0\r\n"
+ "ldc1 %0, 0(%5)\r\n"
+ "ldc1 %1, 8(%5)\r\n"
+
+ "5: \r\n"
+ "nop \r\n"
+ ".set reorder\r\n"
+
+ : "=f" (mmA), "=f" (mmG), "=f" (mmF)
+ : "r" (col), "r" (num_rows), "r" (inptr)
+ : "$f0", "$f2", "$f4", "$8", "$9", "$10", "$11", "$12", "$13",
+ "$14", "memory"
+ );
+ } else {
+ if (!(((long)inptr) & 7)) {
+ mmA = _mm_load_si64((__m64 *)&inptr[0]);
+ mmG = _mm_load_si64((__m64 *)&inptr[8]);
+ mmF = _mm_load_si64((__m64 *)&inptr[16]);
+ } else {
+ mmA = _mm_loadu_si64((__m64 *)&inptr[0]);
+ mmG = _mm_loadu_si64((__m64 *)&inptr[8]);
+ mmF = _mm_loadu_si64((__m64 *)&inptr[16]);
+ }
+ inptr += RGB_PIXELSIZE * 8;
+ }
+ mmD = _mm_srli_si64(mmA, 4 * BYTE_BIT);
+ mmA = _mm_slli_si64(mmA, 4 * BYTE_BIT);
+
+ mmA = _mm_unpackhi_pi8(mmA, mmG);
+ mmG = _mm_slli_si64(mmG, 4 * BYTE_BIT);
+
+ mmD = _mm_unpacklo_pi8(mmD, mmF);
+ mmG = _mm_unpackhi_pi8(mmG, mmF);
+
+ mmE = _mm_srli_si64(mmA, 4 * BYTE_BIT);
+ mmA = _mm_slli_si64(mmA, 4 * BYTE_BIT);
+
+ mmA = _mm_unpackhi_pi8(mmA, mmD);
+ mmD = _mm_slli_si64(mmD, 4 * BYTE_BIT);
+
+ mmE = _mm_unpacklo_pi8(mmE, mmG);
+ mmD = _mm_unpackhi_pi8(mmD, mmG);
+ mmC = _mm_loadhi_pi8_f(mmA);
+ mmA = _mm_loadlo_pi8_f(mmA);
+
+ mmB = _mm_loadhi_pi8_f(mmE);
+ mmE = _mm_loadlo_pi8_f(mmE);
+
+ mmF = _mm_loadhi_pi8_f(mmD);
+ mmD = _mm_loadlo_pi8_f(mmD);
+
+#else /* RGB_PIXELSIZE == 4 */
+
+ if (num_cols < 8) {
+ col = num_cols;
+ asm(".set noreorder\r\n"
+
+ "li $8, 1\r\n"
+ "move $9, %4\r\n"
+ "and $10, $9, $8\r\n"
+ "beqz $10, 1f\r\n"
+ "nop \r\n"
+ "subu $9, $9, 1\r\n"
+ PTR_SLL "$11, $9, 2\r\n"
+ "move $13, %5\r\n"
+ PTR_ADDU "$13, $13, $11\r\n"
+ "lwc1 %0, 0($13)\r\n"
+
+ "1: \r\n"
+ "li $8, 2\r\n"
+ "and $10, $9, $8\r\n"
+ "beqz $10, 2f\r\n"
+ "nop \r\n"
+ "subu $9, $9, 2\r\n"
+ PTR_SLL "$11, $9, 2\r\n"
+ "move $13, %5\r\n"
+ PTR_ADDU "$13, $13, $11\r\n"
+ "mov.s %1, %0\r\n"
+ "ldc1 %0, 0($13)\r\n"
+
+ "2: \r\n"
+ "li $8, 4\r\n"
+ "and $10, $9, $8\r\n"
+ "beqz $10, 3f\r\n"
+ "nop \r\n"
+ "mov.s %2, %0\r\n"
+ "mov.s %3, %1\r\n"
+ "ldc1 %0, 0(%5)\r\n"
+ "ldc1 %1, 8(%5)\r\n"
+
+ "3: \r\n"
+ "nop \r\n"
+ ".set reorder\r\n"
+
+ : "=f" (mmA), "=f" (mmF), "=f" (mmD), "=f" (mmC)
+ : "r" (col), "r" (inptr)
+ : "$f0", "$f2", "$8", "$9", "$10", "$11", "$13", "memory"
+ );
+ } else {
+ if (!(((long)inptr) & 7)) {
+ mmA = _mm_load_si64((__m64 *)&inptr[0]);
+ mmF = _mm_load_si64((__m64 *)&inptr[8]);
+ mmD = _mm_load_si64((__m64 *)&inptr[16]);
+ mmC = _mm_load_si64((__m64 *)&inptr[24]);
+ } else {
+ mmA = _mm_loadu_si64((__m64 *)&inptr[0]);
+ mmF = _mm_loadu_si64((__m64 *)&inptr[8]);
+ mmD = _mm_loadu_si64((__m64 *)&inptr[16]);
+ mmC = _mm_loadu_si64((__m64 *)&inptr[24]);
+ }
+ inptr += RGB_PIXELSIZE * 8;
+ }
+ mmB = _mm_unpackhi_pi8(mmA, mmF);
+ mmA = _mm_unpacklo_pi8(mmA, mmF);
+
+ mmG = _mm_unpackhi_pi8(mmD, mmC);
+ mmD = _mm_unpacklo_pi8(mmD, mmC);
+
+ mmE = _mm_unpackhi_pi16(mmA, mmD);
+ mmA = _mm_unpacklo_pi16(mmA, mmD);
+
+ mmH = _mm_unpackhi_pi16(mmB, mmG);
+ mmB = _mm_unpacklo_pi16(mmB, mmG);
+
+ mmC = _mm_loadhi_pi8_f(mmA);
+ mmA = _mm_loadlo_pi8_f(mmA);
+
+ mmD = _mm_loadhi_pi8_f(mmB);
+ mmB = _mm_loadlo_pi8_f(mmB);
+
+ mmG = _mm_loadhi_pi8_f(mmE);
+ mmE = _mm_loadlo_pi8_f(mmE);
+
+ mmF = _mm_unpacklo_pi8(mmH, mmH);
+ mmH = _mm_unpackhi_pi8(mmH, mmH);
+ mmF = _mm_srli_pi16(mmF, BYTE_BIT);
+ mmH = _mm_srli_pi16(mmH, BYTE_BIT);
+
+#endif
+
+ /* re=(R0 R2 R4 R6), ge=(G0 G2 G4 G6), be=(B0 B2 B4 B6)
+ * ro=(R1 R3 R5 R7), go=(G1 G3 G5 G7), bo=(B1 B3 B5 B7)
+ *
+ * (Original)
+ * Y = 0.29900 * R + 0.58700 * G + 0.11400 * B
+ * Cb = -0.16874 * R - 0.33126 * G + 0.50000 * B + CENTERJSAMPLE
+ * Cr = 0.50000 * R - 0.41869 * G - 0.08131 * B + CENTERJSAMPLE
+ *
+ * (This implementation)
+ * Y = 0.29900 * R + 0.33700 * G + 0.11400 * B + 0.25000 * G
+ * Cb = -0.16874 * R - 0.33126 * G + 0.50000 * B + CENTERJSAMPLE
+ * Cr = 0.50000 * R - 0.41869 * G - 0.08131 * B + CENTERJSAMPLE
+ */
+
+ rglo = _mm_unpacklo_pi16(ro, go);
+ rgho = _mm_unpackhi_pi16(ro, go);
+ ylo_rg = _mm_madd_pi16(rglo, PW_F0299_F0337);
+ yho_rg = _mm_madd_pi16(rgho, PW_F0299_F0337);
+ cblo = _mm_madd_pi16(rglo, PW_MF016_MF033);
+ cbho = _mm_madd_pi16(rgho, PW_MF016_MF033);
+
+ blo = _mm_loadlo_pi16_f(bo);
+ bho = _mm_loadhi_pi16_f(bo);
+ halfblo = _mm_srli_pi32(blo, 1);
+ halfbho = _mm_srli_pi32(bho, 1);
+
+ cblo = _mm_add_pi32(cblo, halfblo);
+ cbho = _mm_add_pi32(cbho, halfbho);
+ cblo = _mm_add_pi32(cblo, PD_ONEHALFM1_CJ);
+ cbho = _mm_add_pi32(cbho, PD_ONEHALFM1_CJ);
+ cblo = _mm_srli_pi32(cblo, SCALEBITS);
+ cbho = _mm_srli_pi32(cbho, SCALEBITS);
+ cbo = _mm_packs_pi32(cblo, cbho);
+
+ rgle = _mm_unpacklo_pi16(re, ge);
+ rghe = _mm_unpackhi_pi16(re, ge);
+ yle_rg = _mm_madd_pi16(rgle, PW_F0299_F0337);
+ yhe_rg = _mm_madd_pi16(rghe, PW_F0299_F0337);
+ cble = _mm_madd_pi16(rgle, PW_MF016_MF033);
+ cbhe = _mm_madd_pi16(rghe, PW_MF016_MF033);
+
+ ble = _mm_loadlo_pi16_f(be);
+ bhe = _mm_loadhi_pi16_f(be);
+ halfble = _mm_srli_pi32(ble, 1);
+ halfbhe = _mm_srli_pi32(bhe, 1);
+
+ cble = _mm_add_pi32(cble, halfble);
+ cbhe = _mm_add_pi32(cbhe, halfbhe);
+ cble = _mm_add_pi32(cble, PD_ONEHALFM1_CJ);
+ cbhe = _mm_add_pi32(cbhe, PD_ONEHALFM1_CJ);
+ cble = _mm_srli_pi32(cble, SCALEBITS);
+ cbhe = _mm_srli_pi32(cbhe, SCALEBITS);
+ cbe = _mm_packs_pi32(cble, cbhe);
+
+ cbo = _mm_slli_pi16(cbo, BYTE_BIT);
+ cb = _mm_or_si64(cbe, cbo);
+
+ bglo = _mm_unpacklo_pi16(bo, go);
+ bgho = _mm_unpackhi_pi16(bo, go);
+ ylo_bg = _mm_madd_pi16(bglo, PW_F0114_F0250);
+ yho_bg = _mm_madd_pi16(bgho, PW_F0114_F0250);
+ crlo = _mm_madd_pi16(bglo, PW_MF008_MF041);
+ crho = _mm_madd_pi16(bgho, PW_MF008_MF041);
+
+ ylo = _mm_add_pi32(ylo_bg, ylo_rg);
+ yho = _mm_add_pi32(yho_bg, yho_rg);
+ ylo = _mm_add_pi32(ylo, PD_ONEHALF);
+ yho = _mm_add_pi32(yho, PD_ONEHALF);
+ ylo = _mm_srli_pi32(ylo, SCALEBITS);
+ yho = _mm_srli_pi32(yho, SCALEBITS);
+ yo = _mm_packs_pi32(ylo, yho);
+
+ rlo = _mm_loadlo_pi16_f(ro);
+ rho = _mm_loadhi_pi16_f(ro);
+ halfrlo = _mm_srli_pi32(rlo, 1);
+ halfrho = _mm_srli_pi32(rho, 1);
+
+ crlo = _mm_add_pi32(crlo, halfrlo);
+ crho = _mm_add_pi32(crho, halfrho);
+ crlo = _mm_add_pi32(crlo, PD_ONEHALFM1_CJ);
+ crho = _mm_add_pi32(crho, PD_ONEHALFM1_CJ);
+ crlo = _mm_srli_pi32(crlo, SCALEBITS);
+ crho = _mm_srli_pi32(crho, SCALEBITS);
+ cro = _mm_packs_pi32(crlo, crho);
+
+ bgle = _mm_unpacklo_pi16(be, ge);
+ bghe = _mm_unpackhi_pi16(be, ge);
+ yle_bg = _mm_madd_pi16(bgle, PW_F0114_F0250);
+ yhe_bg = _mm_madd_pi16(bghe, PW_F0114_F0250);
+ crle = _mm_madd_pi16(bgle, PW_MF008_MF041);
+ crhe = _mm_madd_pi16(bghe, PW_MF008_MF041);
+
+ yle = _mm_add_pi32(yle_bg, yle_rg);
+ yhe = _mm_add_pi32(yhe_bg, yhe_rg);
+ yle = _mm_add_pi32(yle, PD_ONEHALF);
+ yhe = _mm_add_pi32(yhe, PD_ONEHALF);
+ yle = _mm_srli_pi32(yle, SCALEBITS);
+ yhe = _mm_srli_pi32(yhe, SCALEBITS);
+ ye = _mm_packs_pi32(yle, yhe);
+
+ yo = _mm_slli_pi16(yo, BYTE_BIT);
+ y = _mm_or_si64(ye, yo);
+
+ rle = _mm_loadlo_pi16_f(re);
+ rhe = _mm_loadhi_pi16_f(re);
+ halfrle = _mm_srli_pi32(rle, 1);
+ halfrhe = _mm_srli_pi32(rhe, 1);
+
+ crle = _mm_add_pi32(crle, halfrle);
+ crhe = _mm_add_pi32(crhe, halfrhe);
+ crle = _mm_add_pi32(crle, PD_ONEHALFM1_CJ);
+ crhe = _mm_add_pi32(crhe, PD_ONEHALFM1_CJ);
+ crle = _mm_srli_pi32(crle, SCALEBITS);
+ crhe = _mm_srli_pi32(crhe, SCALEBITS);
+ cre = _mm_packs_pi32(crle, crhe);
+
+ cro = _mm_slli_pi16(cro, BYTE_BIT);
+ cr = _mm_or_si64(cre, cro);
+
+ _mm_store_si64((__m64 *)&outptr0[0], y);
+ _mm_store_si64((__m64 *)&outptr1[0], cb);
+ _mm_store_si64((__m64 *)&outptr2[0], cr);
+ }
+ }
+}
+
+#undef mmA
+#undef mmB
+#undef mmC
+#undef mmD
+#undef mmE
+#undef mmF
+#undef mmG
+#undef mmH
diff --git a/media/libjpeg/simd/mips64/jccolor-mmi.c b/media/libjpeg/simd/mips64/jccolor-mmi.c
new file mode 100644
index 0000000000..93ef5c79f7
--- /dev/null
+++ b/media/libjpeg/simd/mips64/jccolor-mmi.c
@@ -0,0 +1,148 @@
+/*
+ * Loongson MMI optimizations for libjpeg-turbo
+ *
+ * Copyright (C) 2011, 2014, D. R. Commander. All Rights Reserved.
+ * Copyright (C) 2016-2017, Loongson Technology Corporation Limited, BeiJing.
+ * All Rights Reserved.
+ * Authors: ZhuChen <zhuchen@loongson.cn>
+ * CaiWanwei <caiwanwei@loongson.cn>
+ * SunZhangzhi <sunzhangzhi-cq@loongson.cn>
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/* RGB --> YCC CONVERSION */
+
+#include "jsimd_mmi.h"
+
+
+#define F_0_081 ((short)5329) /* FIX(0.08131) */
+#define F_0_114 ((short)7471) /* FIX(0.11400) */
+#define F_0_168 ((short)11059) /* FIX(0.16874) */
+#define F_0_250 ((short)16384) /* FIX(0.25000) */
+#define F_0_299 ((short)19595) /* FIX(0.29900) */
+#define F_0_331 ((short)21709) /* FIX(0.33126) */
+#define F_0_418 ((short)27439) /* FIX(0.41869) */
+#define F_0_587 ((short)38470) /* FIX(0.58700) */
+#define F_0_337 ((short)(F_0_587 - F_0_250)) /* FIX(0.58700) - FIX(0.25000) */
+
+enum const_index {
+ index_PD_ONEHALF,
+ index_PW_F0299_F0337,
+ index_PW_F0114_F0250,
+ index_PW_MF016_MF033,
+ index_PW_MF008_MF041,
+ index_PD_ONEHALFM1_CJ
+};
+
+static uint64_t const_value[] = {
+ _uint64_set_pi32((int)(1 << (SCALEBITS - 1)), (int)(1 << (SCALEBITS - 1))),
+ _uint64_set_pi16(F_0_337, F_0_299, F_0_337, F_0_299),
+ _uint64_set_pi16(F_0_250, F_0_114, F_0_250, F_0_114),
+ _uint64_set_pi16(-F_0_331, -F_0_168, -F_0_331, -F_0_168),
+ _uint64_set_pi16(-F_0_418, -F_0_081, -F_0_418, -F_0_081),
+ _uint64_set_pi32(((1 << (SCALEBITS - 1)) - 1 + (CENTERJSAMPLE << SCALEBITS)),
+ ((1 << (SCALEBITS - 1)) - 1 + (CENTERJSAMPLE << SCALEBITS)))
+};
+
+#define get_const_value(index) (*(__m64 *)&const_value[index])
+
+#define PD_ONEHALF get_const_value(index_PD_ONEHALF)
+#define PW_F0299_F0337 get_const_value(index_PW_F0299_F0337)
+#define PW_F0114_F0250 get_const_value(index_PW_F0114_F0250)
+#define PW_MF016_MF033 get_const_value(index_PW_MF016_MF033)
+#define PW_MF008_MF041 get_const_value(index_PW_MF008_MF041)
+#define PD_ONEHALFM1_CJ get_const_value(index_PD_ONEHALFM1_CJ)
+
+
+#include "jccolext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+
+#define RGB_RED EXT_RGB_RED
+#define RGB_GREEN EXT_RGB_GREEN
+#define RGB_BLUE EXT_RGB_BLUE
+#define RGB_PIXELSIZE EXT_RGB_PIXELSIZE
+#define jsimd_rgb_ycc_convert_mmi jsimd_extrgb_ycc_convert_mmi
+#include "jccolext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_rgb_ycc_convert_mmi
+
+#define RGB_RED EXT_RGBX_RED
+#define RGB_GREEN EXT_RGBX_GREEN
+#define RGB_BLUE EXT_RGBX_BLUE
+#define RGB_PIXELSIZE EXT_RGBX_PIXELSIZE
+#define jsimd_rgb_ycc_convert_mmi jsimd_extrgbx_ycc_convert_mmi
+#include "jccolext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_rgb_ycc_convert_mmi
+
+#define RGB_RED EXT_BGR_RED
+#define RGB_GREEN EXT_BGR_GREEN
+#define RGB_BLUE EXT_BGR_BLUE
+#define RGB_PIXELSIZE EXT_BGR_PIXELSIZE
+#define jsimd_rgb_ycc_convert_mmi jsimd_extbgr_ycc_convert_mmi
+#include "jccolext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_rgb_ycc_convert_mmi
+
+#define RGB_RED EXT_BGRX_RED
+#define RGB_GREEN EXT_BGRX_GREEN
+#define RGB_BLUE EXT_BGRX_BLUE
+#define RGB_PIXELSIZE EXT_BGRX_PIXELSIZE
+#define jsimd_rgb_ycc_convert_mmi jsimd_extbgrx_ycc_convert_mmi
+#include "jccolext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_rgb_ycc_convert_mmi
+
+#define RGB_RED EXT_XBGR_RED
+#define RGB_GREEN EXT_XBGR_GREEN
+#define RGB_BLUE EXT_XBGR_BLUE
+#define RGB_PIXELSIZE EXT_XBGR_PIXELSIZE
+#define jsimd_rgb_ycc_convert_mmi jsimd_extxbgr_ycc_convert_mmi
+#include "jccolext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_rgb_ycc_convert_mmi
+
+#define RGB_RED EXT_XRGB_RED
+#define RGB_GREEN EXT_XRGB_GREEN
+#define RGB_BLUE EXT_XRGB_BLUE
+#define RGB_PIXELSIZE EXT_XRGB_PIXELSIZE
+#define jsimd_rgb_ycc_convert_mmi jsimd_extxrgb_ycc_convert_mmi
+#include "jccolext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_rgb_ycc_convert_mmi
diff --git a/media/libjpeg/simd/mips64/jcgray-mmi.c b/media/libjpeg/simd/mips64/jcgray-mmi.c
new file mode 100644
index 0000000000..9c7b833f2e
--- /dev/null
+++ b/media/libjpeg/simd/mips64/jcgray-mmi.c
@@ -0,0 +1,132 @@
+/*
+ * Loongson MMI optimizations for libjpeg-turbo
+ *
+ * Copyright (C) 2011, 2014, D. R. Commander. All Rights Reserved.
+ * Copyright (C) 2016-2018, Loongson Technology Corporation Limited, BeiJing.
+ * All Rights Reserved.
+ * Authors: ZhangLixia <zhanglixia-hf@loongson.cn>
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/* RGB --> GRAYSCALE CONVERSION */
+
+#include "jsimd_mmi.h"
+
+
+#define F_0_114 ((short)7471) /* FIX(0.11400) */
+#define F_0_250 ((short)16384) /* FIX(0.25000) */
+#define F_0_299 ((short)19595) /* FIX(0.29900) */
+#define F_0_587 ((short)38470) /* FIX(0.58700) */
+#define F_0_337 ((short)(F_0_587 - F_0_250)) /* FIX(0.58700) - FIX(0.25000) */
+
+enum const_index {
+ index_PD_ONEHALF,
+ index_PW_F0299_F0337,
+ index_PW_F0114_F0250
+};
+
+static uint64_t const_value[] = {
+ _uint64_set_pi32((int)(1 << (SCALEBITS - 1)), (int)(1 << (SCALEBITS - 1))),
+ _uint64_set_pi16(F_0_337, F_0_299, F_0_337, F_0_299),
+ _uint64_set_pi16(F_0_250, F_0_114, F_0_250, F_0_114)
+};
+
+#define get_const_value(index) (*(__m64 *)&const_value[index])
+
+#define PD_ONEHALF get_const_value(index_PD_ONEHALF)
+#define PW_F0299_F0337 get_const_value(index_PW_F0299_F0337)
+#define PW_F0114_F0250 get_const_value(index_PW_F0114_F0250)
+
+
+#include "jcgryext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+
+#define RGB_RED EXT_RGB_RED
+#define RGB_GREEN EXT_RGB_GREEN
+#define RGB_BLUE EXT_RGB_BLUE
+#define RGB_PIXELSIZE EXT_RGB_PIXELSIZE
+#define jsimd_rgb_gray_convert_mmi jsimd_extrgb_gray_convert_mmi
+#include "jcgryext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_rgb_gray_convert_mmi
+
+#define RGB_RED EXT_RGBX_RED
+#define RGB_GREEN EXT_RGBX_GREEN
+#define RGB_BLUE EXT_RGBX_BLUE
+#define RGB_PIXELSIZE EXT_RGBX_PIXELSIZE
+#define jsimd_rgb_gray_convert_mmi jsimd_extrgbx_gray_convert_mmi
+#include "jcgryext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_rgb_gray_convert_mmi
+
+#define RGB_RED EXT_BGR_RED
+#define RGB_GREEN EXT_BGR_GREEN
+#define RGB_BLUE EXT_BGR_BLUE
+#define RGB_PIXELSIZE EXT_BGR_PIXELSIZE
+#define jsimd_rgb_gray_convert_mmi jsimd_extbgr_gray_convert_mmi
+#include "jcgryext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_rgb_gray_convert_mmi
+
+#define RGB_RED EXT_BGRX_RED
+#define RGB_GREEN EXT_BGRX_GREEN
+#define RGB_BLUE EXT_BGRX_BLUE
+#define RGB_PIXELSIZE EXT_BGRX_PIXELSIZE
+#define jsimd_rgb_gray_convert_mmi jsimd_extbgrx_gray_convert_mmi
+#include "jcgryext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_rgb_gray_convert_mmi
+
+#define RGB_RED EXT_XBGR_RED
+#define RGB_GREEN EXT_XBGR_GREEN
+#define RGB_BLUE EXT_XBGR_BLUE
+#define RGB_PIXELSIZE EXT_XBGR_PIXELSIZE
+#define jsimd_rgb_gray_convert_mmi jsimd_extxbgr_gray_convert_mmi
+#include "jcgryext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_rgb_gray_convert_mmi
+
+#define RGB_RED EXT_XRGB_RED
+#define RGB_GREEN EXT_XRGB_GREEN
+#define RGB_BLUE EXT_XRGB_BLUE
+#define RGB_PIXELSIZE EXT_XRGB_PIXELSIZE
+#define jsimd_rgb_gray_convert_mmi jsimd_extxrgb_gray_convert_mmi
+#include "jcgryext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_rgb_gray_convert_mmi
diff --git a/media/libjpeg/simd/mips64/jcgryext-mmi.c b/media/libjpeg/simd/mips64/jcgryext-mmi.c
new file mode 100644
index 0000000000..08a83d6699
--- /dev/null
+++ b/media/libjpeg/simd/mips64/jcgryext-mmi.c
@@ -0,0 +1,374 @@
+/*
+ * Loongson MMI optimizations for libjpeg-turbo
+ *
+ * Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
+ * Copyright (C) 2014-2015, 2019, D. R. Commander. All Rights Reserved.
+ * Copyright (C) 2016-2018, Loongson Technology Corporation Limited, BeiJing.
+ * All Rights Reserved.
+ * Authors: ZhangLixia <zhanglixia-hf@loongson.cn>
+ *
+ * Based on the x86 SIMD extension for IJG JPEG library
+ * Copyright (C) 1999-2006, MIYASAKA Masaru.
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/* This file is included by jcgray-mmi.c */
+
+
+#if RGB_RED == 0
+#define mmA re
+#define mmB ro
+#elif RGB_GREEN == 0
+#define mmA ge
+#define mmB go
+#elif RGB_BLUE == 0
+#define mmA be
+#define mmB bo
+#else
+#define mmA xe
+#define mmB xo
+#endif
+
+#if RGB_RED == 1
+#define mmC re
+#define mmD ro
+#elif RGB_GREEN == 1
+#define mmC ge
+#define mmD go
+#elif RGB_BLUE == 1
+#define mmC be
+#define mmD bo
+#else
+#define mmC xe
+#define mmD xo
+#endif
+
+#if RGB_RED == 2
+#define mmE re
+#define mmF ro
+#elif RGB_GREEN == 2
+#define mmE ge
+#define mmF go
+#elif RGB_BLUE == 2
+#define mmE be
+#define mmF bo
+#else
+#define mmE xe
+#define mmF xo
+#endif
+
+#if RGB_RED == 3
+#define mmG re
+#define mmH ro
+#elif RGB_GREEN == 3
+#define mmG ge
+#define mmH go
+#elif RGB_BLUE == 3
+#define mmG be
+#define mmH bo
+#else
+#define mmG xe
+#define mmH xo
+#endif
+
+
+void jsimd_rgb_gray_convert_mmi(JDIMENSION image_width, JSAMPARRAY input_buf,
+ JSAMPIMAGE output_buf, JDIMENSION output_row,
+ int num_rows)
+{
+ JSAMPROW inptr, outptr;
+ int num_cols, col;
+ __m64 re, ro, ge, go, be, bo, xe;
+#if RGB_PIXELSIZE == 4
+ __m64 xo;
+#endif
+ __m64 rgle, rghe, rglo, rgho, bgle, bghe, bglo, bgho;
+ __m64 yle_rg, yhe_rg, yle_bg, yhe_bg, yle, yhe, ye;
+ __m64 ylo_rg, yho_rg, ylo_bg, yho_bg, ylo, yho, yo, y;
+
+ while (--num_rows >= 0) {
+ inptr = *input_buf++;
+ outptr = output_buf[0][output_row];
+ output_row++;
+
+ for (num_cols = image_width; num_cols > 0; num_cols -= 8,
+ outptr += 8) {
+
+#if RGB_PIXELSIZE == 3
+
+ if (num_cols < 8) {
+ col = num_cols * 3;
+ asm(".set noreorder\r\n"
+
+ "li $8, 1\r\n"
+ "move $9, %3\r\n"
+ "and $10, $9, $8\r\n"
+ "beqz $10, 1f\r\n"
+ "nop \r\n"
+ "subu $9, $9, 1\r\n"
+ "xor $12, $12, $12\r\n"
+ "move $13, %5\r\n"
+ PTR_ADDU "$13, $13, $9\r\n"
+ "lbu $12, 0($13)\r\n"
+
+ "1: \r\n"
+ "li $8, 2\r\n"
+ "and $10, $9, $8\r\n"
+ "beqz $10, 2f\r\n"
+ "nop \r\n"
+ "subu $9, $9, 2\r\n"
+ "xor $11, $11, $11\r\n"
+ "move $13, %5\r\n"
+ PTR_ADDU "$13, $13, $9\r\n"
+ "lhu $11, 0($13)\r\n"
+ "sll $12, $12, 16\r\n"
+ "or $12, $12, $11\r\n"
+
+ "2: \r\n"
+ "dmtc1 $12, %0\r\n"
+ "li $8, 4\r\n"
+ "and $10, $9, $8\r\n"
+ "beqz $10, 3f\r\n"
+ "nop \r\n"
+ "subu $9, $9, 4\r\n"
+ "move $13, %5\r\n"
+ PTR_ADDU "$13, $13, $9\r\n"
+ "lwu $14, 0($13)\r\n"
+ "dmtc1 $14, %1\r\n"
+ "dsll32 $12, $12, 0\r\n"
+ "or $12, $12, $14\r\n"
+ "dmtc1 $12, %0\r\n"
+
+ "3: \r\n"
+ "li $8, 8\r\n"
+ "and $10, $9, $8\r\n"
+ "beqz $10, 4f\r\n"
+ "nop \r\n"
+ "mov.s %1, %0\r\n"
+ "ldc1 %0, 0(%5)\r\n"
+ "li $9, 8\r\n"
+ "j 5f\r\n"
+ "nop \r\n"
+
+ "4: \r\n"
+ "li $8, 16\r\n"
+ "and $10, $9, $8\r\n"
+ "beqz $10, 5f\r\n"
+ "nop \r\n"
+ "mov.s %2, %0\r\n"
+ "ldc1 %0, 0(%5)\r\n"
+ "ldc1 %1, 8(%5)\r\n"
+
+ "5: \r\n"
+ "nop \r\n"
+ ".set reorder\r\n"
+
+ : "=f" (mmA), "=f" (mmG), "=f" (mmF)
+ : "r" (col), "r" (num_rows), "r" (inptr)
+ : "$f0", "$f2", "$f4", "$8", "$9", "$10", "$11", "$12", "$13",
+ "$14", "memory"
+ );
+ } else {
+ if (!(((long)inptr) & 7)) {
+ mmA = _mm_load_si64((__m64 *)&inptr[0]);
+ mmG = _mm_load_si64((__m64 *)&inptr[8]);
+ mmF = _mm_load_si64((__m64 *)&inptr[16]);
+ } else {
+ mmA = _mm_loadu_si64((__m64 *)&inptr[0]);
+ mmG = _mm_loadu_si64((__m64 *)&inptr[8]);
+ mmF = _mm_loadu_si64((__m64 *)&inptr[16]);
+ }
+ inptr += RGB_PIXELSIZE * 8;
+ }
+ mmD = _mm_srli_si64(mmA, 4 * BYTE_BIT);
+ mmA = _mm_slli_si64(mmA, 4 * BYTE_BIT);
+
+ mmA = _mm_unpackhi_pi8(mmA, mmG);
+ mmG = _mm_slli_si64(mmG, 4 * BYTE_BIT);
+
+ mmD = _mm_unpacklo_pi8(mmD, mmF);
+ mmG = _mm_unpackhi_pi8(mmG, mmF);
+
+ mmE = _mm_srli_si64(mmA, 4 * BYTE_BIT);
+ mmA = _mm_slli_si64(mmA, 4 * BYTE_BIT);
+
+ mmA = _mm_unpackhi_pi8(mmA, mmD);
+ mmD = _mm_slli_si64(mmD, 4 * BYTE_BIT);
+
+ mmE = _mm_unpacklo_pi8(mmE, mmG);
+ mmD = _mm_unpackhi_pi8(mmD, mmG);
+ mmC = _mm_loadhi_pi8_f(mmA);
+ mmA = _mm_loadlo_pi8_f(mmA);
+
+ mmB = _mm_loadhi_pi8_f(mmE);
+ mmE = _mm_loadlo_pi8_f(mmE);
+
+ mmF = _mm_loadhi_pi8_f(mmD);
+ mmD = _mm_loadlo_pi8_f(mmD);
+
+#else /* RGB_PIXELSIZE == 4 */
+
+ if (num_cols < 8) {
+ col = num_cols;
+ asm(".set noreorder\r\n"
+
+ "li $8, 1\r\n"
+ "move $9, %4\r\n"
+ "and $10, $9, $8\r\n"
+ "beqz $10, 1f\r\n"
+ "nop \r\n"
+ "subu $9, $9, 1\r\n"
+ PTR_SLL "$11, $9, 2\r\n"
+ "move $13, %5\r\n"
+ PTR_ADDU "$13, $13, $11\r\n"
+ "lwc1 %0, 0($13)\r\n"
+
+ "1: \r\n"
+ "li $8, 2\r\n"
+ "and $10, $9, $8\r\n"
+ "beqz $10, 2f\r\n"
+ "nop \r\n"
+ "subu $9, $9, 2\r\n"
+ PTR_SLL "$11, $9, 2\r\n"
+ "move $13, %5\r\n"
+ PTR_ADDU "$13, $13, $11\r\n"
+ "mov.s %1, %0\r\n"
+ "ldc1 %0, 0($13)\r\n"
+
+ "2: \r\n"
+ "li $8, 4\r\n"
+ "and $10, $9, $8\r\n"
+ "beqz $10, 3f\r\n"
+ "nop \r\n"
+ "mov.s %2, %0\r\n"
+ "mov.s %3, %1\r\n"
+ "ldc1 %0, 0(%5)\r\n"
+ "ldc1 %1, 8(%5)\r\n"
+
+ "3: \r\n"
+ "nop \r\n"
+ ".set reorder\r\n"
+
+ : "=f" (mmA), "=f" (mmF), "=f" (mmD), "=f" (mmC)
+ : "r" (col), "r" (inptr)
+ : "$f0", "$f2", "$8", "$9", "$10", "$11", "$13", "memory"
+ );
+ } else {
+ if (!(((long)inptr) & 7)) {
+ mmA = _mm_load_si64((__m64 *)&inptr[0]);
+ mmF = _mm_load_si64((__m64 *)&inptr[8]);
+ mmD = _mm_load_si64((__m64 *)&inptr[16]);
+ mmC = _mm_load_si64((__m64 *)&inptr[24]);
+ } else {
+ mmA = _mm_loadu_si64((__m64 *)&inptr[0]);
+ mmF = _mm_loadu_si64((__m64 *)&inptr[8]);
+ mmD = _mm_loadu_si64((__m64 *)&inptr[16]);
+ mmC = _mm_loadu_si64((__m64 *)&inptr[24]);
+ }
+ inptr += RGB_PIXELSIZE * 8;
+ }
+ mmB = _mm_unpackhi_pi8(mmA, mmF);
+ mmA = _mm_unpacklo_pi8(mmA, mmF);
+
+ mmG = _mm_unpackhi_pi8(mmD, mmC);
+ mmD = _mm_unpacklo_pi8(mmD, mmC);
+
+ mmE = _mm_unpackhi_pi16(mmA, mmD);
+ mmA = _mm_unpacklo_pi16(mmA, mmD);
+
+ mmH = _mm_unpackhi_pi16(mmB, mmG);
+ mmB = _mm_unpacklo_pi16(mmB, mmG);
+
+ mmC = _mm_loadhi_pi8_f(mmA);
+ mmA = _mm_loadlo_pi8_f(mmA);
+
+ mmD = _mm_loadhi_pi8_f(mmB);
+ mmB = _mm_loadlo_pi8_f(mmB);
+
+ mmG = _mm_loadhi_pi8_f(mmE);
+ mmE = _mm_loadlo_pi8_f(mmE);
+
+ mmF = _mm_unpacklo_pi8(mmH, mmH);
+ mmH = _mm_unpackhi_pi8(mmH, mmH);
+ mmF = _mm_srli_pi16(mmF, BYTE_BIT);
+ mmH = _mm_srli_pi16(mmH, BYTE_BIT);
+
+#endif
+
+ /* re=(R0 R2 R4 R6), ge=(G0 G2 G4 G6), be=(B0 B2 B4 B6)
+ * ro=(R1 R3 R5 R7), go=(G1 G3 G5 G7), bo=(B1 B3 B5 B7)
+ *
+ * (Original)
+ * Y = 0.29900 * R + 0.58700 * G + 0.11400 * B
+ *
+ * (This implementation)
+ * Y = 0.29900 * R + 0.33700 * G + 0.11400 * B + 0.25000 * G
+ */
+
+ rglo = _mm_unpacklo_pi16(ro, go);
+ rgho = _mm_unpackhi_pi16(ro, go);
+ ylo_rg = _mm_madd_pi16(rglo, PW_F0299_F0337);
+ yho_rg = _mm_madd_pi16(rgho, PW_F0299_F0337);
+
+ rgle = _mm_unpacklo_pi16(re, ge);
+ rghe = _mm_unpackhi_pi16(re, ge);
+ yle_rg = _mm_madd_pi16(rgle, PW_F0299_F0337);
+ yhe_rg = _mm_madd_pi16(rghe, PW_F0299_F0337);
+
+ bglo = _mm_unpacklo_pi16(bo, go);
+ bgho = _mm_unpackhi_pi16(bo, go);
+ ylo_bg = _mm_madd_pi16(bglo, PW_F0114_F0250);
+ yho_bg = _mm_madd_pi16(bgho, PW_F0114_F0250);
+
+ ylo = _mm_add_pi32(ylo_bg, ylo_rg);
+ yho = _mm_add_pi32(yho_bg, yho_rg);
+ ylo = _mm_add_pi32(ylo, PD_ONEHALF);
+ yho = _mm_add_pi32(yho, PD_ONEHALF);
+ ylo = _mm_srli_pi32(ylo, SCALEBITS);
+ yho = _mm_srli_pi32(yho, SCALEBITS);
+ yo = _mm_packs_pi32(ylo, yho);
+
+ bgle = _mm_unpacklo_pi16(be, ge);
+ bghe = _mm_unpackhi_pi16(be, ge);
+ yle_bg = _mm_madd_pi16(bgle, PW_F0114_F0250);
+ yhe_bg = _mm_madd_pi16(bghe, PW_F0114_F0250);
+
+ yle = _mm_add_pi32(yle_bg, yle_rg);
+ yhe = _mm_add_pi32(yhe_bg, yhe_rg);
+ yle = _mm_add_pi32(yle, PD_ONEHALF);
+ yhe = _mm_add_pi32(yhe, PD_ONEHALF);
+ yle = _mm_srli_pi32(yle, SCALEBITS);
+ yhe = _mm_srli_pi32(yhe, SCALEBITS);
+ ye = _mm_packs_pi32(yle, yhe);
+
+ yo = _mm_slli_pi16(yo, BYTE_BIT);
+ y = _mm_or_si64(ye, yo);
+
+ _mm_store_si64((__m64 *)&outptr[0], y);
+ }
+ }
+}
+
+#undef mmA
+#undef mmB
+#undef mmC
+#undef mmD
+#undef mmE
+#undef mmF
+#undef mmG
+#undef mmH
diff --git a/media/libjpeg/simd/mips64/jcsample-mmi.c b/media/libjpeg/simd/mips64/jcsample-mmi.c
new file mode 100644
index 0000000000..0354dac087
--- /dev/null
+++ b/media/libjpeg/simd/mips64/jcsample-mmi.c
@@ -0,0 +1,98 @@
+/*
+ * Loongson MMI optimizations for libjpeg-turbo
+ *
+ * Copyright (C) 2015, 2018-2019, D. R. Commander. All Rights Reserved.
+ * Copyright (C) 2016-2017, Loongson Technology Corporation Limited, BeiJing.
+ * All Rights Reserved.
+ * Authors: ZhuChen <zhuchen@loongson.cn>
+ * CaiWanwei <caiwanwei@loongson.cn>
+ * SunZhangzhi <sunzhangzhi-cq@loongson.cn>
+ *
+ * Based on the x86 SIMD extension for IJG JPEG library
+ * Copyright (C) 1999-2006, MIYASAKA Masaru.
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/* CHROMA DOWNSAMPLING */
+
+#include "jsimd_mmi.h"
+#include "jcsample.h"
+
+
+void jsimd_h2v2_downsample_mmi(JDIMENSION image_width, int max_v_samp_factor,
+ JDIMENSION v_samp_factor,
+ JDIMENSION width_in_blocks,
+ JSAMPARRAY input_data, JSAMPARRAY output_data)
+{
+ int inrow, outrow, outcol;
+ JDIMENSION output_cols = width_in_blocks * DCTSIZE;
+ JSAMPROW inptr0, inptr1, outptr;
+ __m64 bias, mask = 0.0, thisavg, nextavg, avg;
+ __m64 this0o, this0e, this0, this0sum, next0o, next0e, next0, next0sum;
+ __m64 this1o, this1e, this1, this1sum, next1o, next1e, next1, next1sum;
+
+ expand_right_edge(input_data, max_v_samp_factor, image_width,
+ output_cols * 2);
+
+ bias = _mm_set1_pi32((1 << 17) + 1); /* 0x00020001 (32-bit bias pattern) */
+ /* bias={1, 2, 1, 2} (16-bit) */
+ mask = _mm_cmpeq_pi16(mask, mask);
+ mask = _mm_srli_pi16(mask, BYTE_BIT); /* {0xFF 0x00 0xFF 0x00 ..} */
+
+ for (inrow = 0, outrow = 0; outrow < v_samp_factor;
+ inrow += 2, outrow++) {
+
+ inptr0 = input_data[inrow];
+ inptr1 = input_data[inrow + 1];
+ outptr = output_data[outrow];
+
+ for (outcol = output_cols; outcol > 0;
+ outcol -= 8, inptr0 += 16, inptr1 += 16, outptr += 8) {
+
+ this0 = _mm_load_si64((__m64 *)&inptr0[0]);
+ this1 = _mm_load_si64((__m64 *)&inptr1[0]);
+ next0 = _mm_load_si64((__m64 *)&inptr0[8]);
+ next1 = _mm_load_si64((__m64 *)&inptr1[8]);
+
+ this0o = _mm_and_si64(this0, mask);
+ this0e = _mm_srli_pi16(this0, BYTE_BIT);
+ this1o = _mm_and_si64(this1, mask);
+ this1e = _mm_srli_pi16(this1, BYTE_BIT);
+ this0sum = _mm_add_pi16(this0o, this0e);
+ this1sum = _mm_add_pi16(this1o, this1e);
+
+ next0o = _mm_and_si64(next0, mask);
+ next0e = _mm_srli_pi16(next0, BYTE_BIT);
+ next1o = _mm_and_si64(next1, mask);
+ next1e = _mm_srli_pi16(next1, BYTE_BIT);
+ next0sum = _mm_add_pi16(next0o, next0e);
+ next1sum = _mm_add_pi16(next1o, next1e);
+
+ thisavg = _mm_add_pi16(this0sum, this1sum);
+ nextavg = _mm_add_pi16(next0sum, next1sum);
+ thisavg = _mm_add_pi16(thisavg, bias);
+ nextavg = _mm_add_pi16(nextavg, bias);
+ thisavg = _mm_srli_pi16(thisavg, 2);
+ nextavg = _mm_srli_pi16(nextavg, 2);
+
+ avg = _mm_packs_pu16(thisavg, nextavg);
+
+ _mm_store_si64((__m64 *)&outptr[0], avg);
+ }
+ }
+}
diff --git a/media/libjpeg/simd/mips64/jcsample.h b/media/libjpeg/simd/mips64/jcsample.h
new file mode 100644
index 0000000000..bd07fcc4ed
--- /dev/null
+++ b/media/libjpeg/simd/mips64/jcsample.h
@@ -0,0 +1,28 @@
+/*
+ * jcsample.h
+ *
+ * This file was part of the Independent JPEG Group's software:
+ * Copyright (C) 1991-1996, Thomas G. Lane.
+ * For conditions of distribution and use, see the accompanying README.ijg
+ * file.
+ */
+
+LOCAL(void)
+expand_right_edge(JSAMPARRAY image_data, int num_rows, JDIMENSION input_cols,
+ JDIMENSION output_cols)
+{
+ register JSAMPROW ptr;
+ register JSAMPLE pixval;
+ register int count;
+ int row;
+ int numcols = (int)(output_cols - input_cols);
+
+ if (numcols > 0) {
+ for (row = 0; row < num_rows; row++) {
+ ptr = image_data[row] + input_cols;
+ pixval = ptr[-1];
+ for (count = numcols; count > 0; count--)
+ *ptr++ = pixval;
+ }
+ }
+}
diff --git a/media/libjpeg/simd/mips64/jdcolext-mmi.c b/media/libjpeg/simd/mips64/jdcolext-mmi.c
new file mode 100644
index 0000000000..3b5b2f2030
--- /dev/null
+++ b/media/libjpeg/simd/mips64/jdcolext-mmi.c
@@ -0,0 +1,415 @@
+/*
+ * Loongson MMI optimizations for libjpeg-turbo
+ *
+ * Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
+ * Copyright (C) 2015, 2019, D. R. Commander. All Rights Reserved.
+ * Copyright (C) 2016-2018, Loongson Technology Corporation Limited, BeiJing.
+ * All Rights Reserved.
+ * Authors: ZhuChen <zhuchen@loongson.cn>
+ * SunZhangzhi <sunzhangzhi-cq@loongson.cn>
+ * CaiWanwei <caiwanwei@loongson.cn>
+ *
+ * Based on the x86 SIMD extension for IJG JPEG library
+ * Copyright (C) 1999-2006, MIYASAKA Masaru.
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/* This file is included by jdcolor-mmi.c */
+
+
+#if RGB_RED == 0
+#define mmA re
+#define mmB ro
+#elif RGB_GREEN == 0
+#define mmA ge
+#define mmB go
+#elif RGB_BLUE == 0
+#define mmA be
+#define mmB bo
+#else
+#define mmA xe
+#define mmB xo
+#endif
+
+#if RGB_RED == 1
+#define mmC re
+#define mmD ro
+#elif RGB_GREEN == 1
+#define mmC ge
+#define mmD go
+#elif RGB_BLUE == 1
+#define mmC be
+#define mmD bo
+#else
+#define mmC xe
+#define mmD xo
+#endif
+
+#if RGB_RED == 2
+#define mmE re
+#define mmF ro
+#elif RGB_GREEN == 2
+#define mmE ge
+#define mmF go
+#elif RGB_BLUE == 2
+#define mmE be
+#define mmF bo
+#else
+#define mmE xe
+#define mmF xo
+#endif
+
+#if RGB_RED == 3
+#define mmG re
+#define mmH ro
+#elif RGB_GREEN == 3
+#define mmG ge
+#define mmH go
+#elif RGB_BLUE == 3
+#define mmG be
+#define mmH bo
+#else
+#define mmG xe
+#define mmH xo
+#endif
+
+
+void jsimd_ycc_rgb_convert_mmi(JDIMENSION out_width, JSAMPIMAGE input_buf,
+ JDIMENSION input_row, JSAMPARRAY output_buf,
+ int num_rows)
+{
+ JSAMPROW outptr, inptr0, inptr1, inptr2;
+ int num_cols, col;
+ __m64 ye, yo, y, cbe, cbe2, cbo, cbo2, cb, cre, cre2, cro, cro2, cr;
+ __m64 re, ro, gle, ghe, ge, glo, gho, go, be, bo, xe = 0.0, xo = 0.0;
+ __m64 decenter, mask;
+
+ while (--num_rows >= 0) {
+ inptr0 = input_buf[0][input_row];
+ inptr1 = input_buf[1][input_row];
+ inptr2 = input_buf[2][input_row];
+ input_row++;
+ outptr = *output_buf++;
+
+ for (num_cols = out_width; num_cols > 0; num_cols -= 8,
+ inptr0 += 8, inptr1 += 8, inptr2 += 8) {
+
+ cb = _mm_load_si64((__m64 *)inptr1);
+ cr = _mm_load_si64((__m64 *)inptr2);
+ y = _mm_load_si64((__m64 *)inptr0);
+
+ mask = decenter = 0.0;
+ mask = _mm_cmpeq_pi16(mask, mask);
+ decenter = _mm_cmpeq_pi16(decenter, decenter);
+ mask = _mm_srli_pi16(mask, BYTE_BIT); /* {0xFF 0x00 0xFF 0x00 ..} */
+ decenter = _mm_slli_pi16(decenter, 7); /* {0xFF80 0xFF80 0xFF80 0xFF80} */
+
+ cbe = _mm_and_si64(mask, cb); /* Cb(0246) */
+ cbo = _mm_srli_pi16(cb, BYTE_BIT); /* Cb(1357) */
+ cre = _mm_and_si64(mask, cr); /* Cr(0246) */
+ cro = _mm_srli_pi16(cr, BYTE_BIT); /* Cr(1357) */
+ cbe = _mm_add_pi16(cbe, decenter);
+ cbo = _mm_add_pi16(cbo, decenter);
+ cre = _mm_add_pi16(cre, decenter);
+ cro = _mm_add_pi16(cro, decenter);
+
+ /* (Original)
+ * R = Y + 1.40200 * Cr
+ * G = Y - 0.34414 * Cb - 0.71414 * Cr
+ * B = Y + 1.77200 * Cb
+ *
+ * (This implementation)
+ * R = Y + 0.40200 * Cr + Cr
+ * G = Y - 0.34414 * Cb + 0.28586 * Cr - Cr
+ * B = Y - 0.22800 * Cb + Cb + Cb
+ */
+
+ cbe2 = _mm_add_pi16(cbe, cbe); /* 2*CbE */
+ cbo2 = _mm_add_pi16(cbo, cbo); /* 2*CbO */
+ cre2 = _mm_add_pi16(cre, cre); /* 2*CrE */
+ cro2 = _mm_add_pi16(cro, cro); /* 2*CrO */
+
+ be = _mm_mulhi_pi16(cbe2, PW_MF0228); /* (2*CbE * -FIX(0.22800) */
+ bo = _mm_mulhi_pi16(cbo2, PW_MF0228); /* (2*CbO * -FIX(0.22800) */
+ re = _mm_mulhi_pi16(cre2, PW_F0402); /* (2*CrE * FIX(0.40200)) */
+ ro = _mm_mulhi_pi16(cro2, PW_F0402); /* (2*CrO * FIX(0.40200)) */
+
+ be = _mm_add_pi16(be, PW_ONE);
+ bo = _mm_add_pi16(bo, PW_ONE);
+ be = _mm_srai_pi16(be, 1); /* (CbE * -FIX(0.22800)) */
+ bo = _mm_srai_pi16(bo, 1); /* (CbO * -FIX(0.22800)) */
+ re = _mm_add_pi16(re, PW_ONE);
+ ro = _mm_add_pi16(ro, PW_ONE);
+ re = _mm_srai_pi16(re, 1); /* (CrE * FIX(0.40200)) */
+ ro = _mm_srai_pi16(ro, 1); /* (CrO * FIX(0.40200)) */
+
+ be = _mm_add_pi16(be, cbe);
+ bo = _mm_add_pi16(bo, cbo);
+ be = _mm_add_pi16(be, cbe); /* (CbE * FIX(1.77200))=(B-Y)E */
+ bo = _mm_add_pi16(bo, cbo); /* (CbO * FIX(1.77200))=(B-Y)O */
+ re = _mm_add_pi16(re, cre); /* (CrE * FIX(1.40200))=(R-Y)E */
+ ro = _mm_add_pi16(ro, cro); /* (CrO * FIX(1.40200))=(R-Y)O */
+
+ gle = _mm_unpacklo_pi16(cbe, cre);
+ ghe = _mm_unpackhi_pi16(cbe, cre);
+ gle = _mm_madd_pi16(gle, PW_MF0344_F0285);
+ ghe = _mm_madd_pi16(ghe, PW_MF0344_F0285);
+ glo = _mm_unpacklo_pi16(cbo, cro);
+ gho = _mm_unpackhi_pi16(cbo, cro);
+ glo = _mm_madd_pi16(glo, PW_MF0344_F0285);
+ gho = _mm_madd_pi16(gho, PW_MF0344_F0285);
+
+ gle = _mm_add_pi32(gle, PD_ONEHALF);
+ ghe = _mm_add_pi32(ghe, PD_ONEHALF);
+ gle = _mm_srai_pi32(gle, SCALEBITS);
+ ghe = _mm_srai_pi32(ghe, SCALEBITS);
+ glo = _mm_add_pi32(glo, PD_ONEHALF);
+ gho = _mm_add_pi32(gho, PD_ONEHALF);
+ glo = _mm_srai_pi32(glo, SCALEBITS);
+ gho = _mm_srai_pi32(gho, SCALEBITS);
+
+ ge = _mm_packs_pi32(gle, ghe); /* CbE*-FIX(0.344)+CrE*FIX(0.285) */
+ go = _mm_packs_pi32(glo, gho); /* CbO*-FIX(0.344)+CrO*FIX(0.285) */
+ ge = _mm_sub_pi16(ge, cre); /* CbE*-FIX(0.344)+CrE*-FIX(0.714)=(G-Y)E */
+ go = _mm_sub_pi16(go, cro); /* CbO*-FIX(0.344)+CrO*-FIX(0.714)=(G-Y)O */
+
+ ye = _mm_and_si64(mask, y); /* Y(0246) */
+ yo = _mm_srli_pi16(y, BYTE_BIT); /* Y(1357) */
+
+ re = _mm_add_pi16(re, ye); /* ((R-Y)E+YE)=(R0 R2 R4 R6) */
+ ro = _mm_add_pi16(ro, yo); /* ((R-Y)O+YO)=(R1 R3 R5 R7) */
+ re = _mm_packs_pu16(re, re); /* (R0 R2 R4 R6 ** ** ** **) */
+ ro = _mm_packs_pu16(ro, ro); /* (R1 R3 R5 R7 ** ** ** **) */
+
+ ge = _mm_add_pi16(ge, ye); /* ((G-Y)E+YE)=(G0 G2 G4 G6) */
+ go = _mm_add_pi16(go, yo); /* ((G-Y)O+YO)=(G1 G3 G5 G7) */
+ ge = _mm_packs_pu16(ge, ge); /* (G0 G2 G4 G6 ** ** ** **) */
+ go = _mm_packs_pu16(go, go); /* (G1 G3 G5 G7 ** ** ** **) */
+
+ be = _mm_add_pi16(be, ye); /* (YE+(B-Y)E)=(B0 B2 B4 B6) */
+ bo = _mm_add_pi16(bo, yo); /* (YO+(B-Y)O)=(B1 B3 B5 B7) */
+ be = _mm_packs_pu16(be, be); /* (B0 B2 B4 B6 ** ** ** **) */
+ bo = _mm_packs_pu16(bo, bo); /* (B1 B3 B5 B7 ** ** ** **) */
+
+#if RGB_PIXELSIZE == 3
+
+ /* mmA=(00 02 04 06 ** ** ** **), mmB=(01 03 05 07 ** ** ** **) */
+ /* mmC=(10 12 14 16 ** ** ** **), mmD=(11 13 15 17 ** ** ** **) */
+ mmA = _mm_unpacklo_pi8(mmA, mmC); /* (00 10 02 12 04 14 06 16) */
+ mmE = _mm_unpacklo_pi8(mmE, mmB); /* (20 01 22 03 24 05 26 07) */
+ mmD = _mm_unpacklo_pi8(mmD, mmF); /* (11 21 13 23 15 25 17 27) */
+
+ mmH = _mm_srli_si64(mmA, 2 * BYTE_BIT);
+
+ mmG = _mm_unpackhi_pi16(mmA, mmE); /* (04 14 24 05 06 16 26 07) */
+ mmA = _mm_unpacklo_pi16(mmA, mmE); /* (00 10 20 01 02 12 22 03) */
+
+ mmE = _mm_srli_si64(mmE, 2 * BYTE_BIT);
+ mmB = _mm_srli_si64(mmD, 2 * BYTE_BIT); /* (13 23 15 25 17 27 -- --) */
+
+ mmC = _mm_unpackhi_pi16(mmD, mmH); /* (15 25 06 16 17 27 -- --) */
+ mmD = _mm_unpacklo_pi16(mmD, mmH); /* (11 21 02 12 13 23 04 14) */
+
+ mmF = _mm_unpackhi_pi16(mmE, mmB); /* (26 07 17 27 -- -- -- --) */
+ mmE = _mm_unpacklo_pi16(mmE, mmB); /* (22 03 13 23 24 05 15 25) */
+
+ mmA = _mm_unpacklo_pi32(mmA, mmD); /* (00 10 20 01 11 21 02 12) */
+ mmE = _mm_unpacklo_pi32(mmE, mmG); /* (22 03 13 23 04 14 24 05) */
+ mmC = _mm_unpacklo_pi32(mmC, mmF); /* (15 25 06 16 26 07 17 27) */
+
+ if (num_cols >= 8) {
+ if (!(((long)outptr) & 7)) {
+ _mm_store_si64((__m64 *)outptr, mmA);
+ _mm_store_si64((__m64 *)(outptr + 8), mmE);
+ _mm_store_si64((__m64 *)(outptr + 16), mmC);
+ } else {
+ _mm_storeu_si64((__m64 *)outptr, mmA);
+ _mm_storeu_si64((__m64 *)(outptr + 8), mmE);
+ _mm_storeu_si64((__m64 *)(outptr + 16), mmC);
+ }
+ outptr += RGB_PIXELSIZE * 8;
+ } else {
+ col = num_cols * 3;
+ asm(".set noreorder\r\n"
+
+ "li $8, 16\r\n"
+ "move $9, %4\r\n"
+ "mov.s $f4, %1\r\n"
+ "mov.s $f6, %3\r\n"
+ "move $10, %5\r\n"
+ "bltu $9, $8, 1f\r\n"
+ "nop \r\n"
+ "gssdlc1 $f4, 7($10)\r\n"
+ "gssdrc1 $f4, 0($10)\r\n"
+ "gssdlc1 $f6, 7+8($10)\r\n"
+ "gssdrc1 $f6, 8($10)\r\n"
+ "mov.s $f4, %2\r\n"
+ "subu $9, $9, 16\r\n"
+ PTR_ADDU "$10, $10, 16\r\n"
+ "b 2f\r\n"
+ "nop \r\n"
+
+ "1: \r\n"
+ "li $8, 8\r\n" /* st8 */
+ "bltu $9, $8, 2f\r\n"
+ "nop \r\n"
+ "gssdlc1 $f4, 7($10)\r\n"
+ "gssdrc1 $f4, 0($10)\r\n"
+ "mov.s $f4, %3\r\n"
+ "subu $9, $9, 8\r\n"
+ PTR_ADDU "$10, $10, 8\r\n"
+
+ "2: \r\n"
+ "li $8, 4\r\n" /* st4 */
+ "mfc1 $11, $f4\r\n"
+ "bltu $9, $8, 3f\r\n"
+ "nop \r\n"
+ "swl $11, 3($10)\r\n"
+ "swr $11, 0($10)\r\n"
+ "li $8, 32\r\n"
+ "mtc1 $8, $f6\r\n"
+ "dsrl $f4, $f4, $f6\r\n"
+ "mfc1 $11, $f4\r\n"
+ "subu $9, $9, 4\r\n"
+ PTR_ADDU "$10, $10, 4\r\n"
+
+ "3: \r\n"
+ "li $8, 2\r\n" /* st2 */
+ "bltu $9, $8, 4f\r\n"
+ "nop \r\n"
+ "ush $11, 0($10)\r\n"
+ "srl $11, 16\r\n"
+ "subu $9, $9, 2\r\n"
+ PTR_ADDU "$10, $10, 2\r\n"
+
+ "4: \r\n"
+ "li $8, 1\r\n" /* st1 */
+ "bltu $9, $8, 5f\r\n"
+ "nop \r\n"
+ "sb $11, 0($10)\r\n"
+
+ "5: \r\n"
+ "nop \r\n" /* end */
+ : "=m" (*outptr)
+ : "f" (mmA), "f" (mmC), "f" (mmE), "r" (col), "r" (outptr)
+ : "$f4", "$f6", "$8", "$9", "$10", "$11", "memory"
+ );
+ }
+
+#else /* RGB_PIXELSIZE == 4 */
+
+#ifdef RGBX_FILLER_0XFF
+ xe = _mm_cmpeq_pi8(xe, xe);
+ xo = _mm_cmpeq_pi8(xo, xo);
+#else
+ xe = _mm_xor_si64(xe, xe);
+ xo = _mm_xor_si64(xo, xo);
+#endif
+ /* mmA=(00 02 04 06 ** ** ** **), mmB=(01 03 05 07 ** ** ** **) */
+ /* mmC=(10 12 14 16 ** ** ** **), mmD=(11 13 15 17 ** ** ** **) */
+ /* mmE=(20 22 24 26 ** ** ** **), mmF=(21 23 25 27 ** ** ** **) */
+ /* mmG=(30 32 34 36 ** ** ** **), mmH=(31 33 35 37 ** ** ** **) */
+
+ mmA = _mm_unpacklo_pi8(mmA, mmC); /* (00 10 02 12 04 14 06 16) */
+ mmE = _mm_unpacklo_pi8(mmE, mmG); /* (20 30 22 32 24 34 26 36) */
+ mmB = _mm_unpacklo_pi8(mmB, mmD); /* (01 11 03 13 05 15 07 17) */
+ mmF = _mm_unpacklo_pi8(mmF, mmH); /* (21 31 23 33 25 35 27 37) */
+
+ mmC = _mm_unpackhi_pi16(mmA, mmE); /* (04 14 24 34 06 16 26 36) */
+ mmA = _mm_unpacklo_pi16(mmA, mmE); /* (00 10 20 30 02 12 22 32) */
+ mmG = _mm_unpackhi_pi16(mmB, mmF); /* (05 15 25 35 07 17 27 37) */
+ mmB = _mm_unpacklo_pi16(mmB, mmF); /* (01 11 21 31 03 13 23 33) */
+
+ mmD = _mm_unpackhi_pi32(mmA, mmB); /* (02 12 22 32 03 13 23 33) */
+ mmA = _mm_unpacklo_pi32(mmA, mmB); /* (00 10 20 30 01 11 21 31) */
+ mmH = _mm_unpackhi_pi32(mmC, mmG); /* (06 16 26 36 07 17 27 37) */
+ mmC = _mm_unpacklo_pi32(mmC, mmG); /* (04 14 24 34 05 15 25 35) */
+
+ if (num_cols >= 8) {
+ if (!(((long)outptr) & 7)) {
+ _mm_store_si64((__m64 *)outptr, mmA);
+ _mm_store_si64((__m64 *)(outptr + 8), mmD);
+ _mm_store_si64((__m64 *)(outptr + 16), mmC);
+ _mm_store_si64((__m64 *)(outptr + 24), mmH);
+ } else {
+ _mm_storeu_si64((__m64 *)outptr, mmA);
+ _mm_storeu_si64((__m64 *)(outptr + 8), mmD);
+ _mm_storeu_si64((__m64 *)(outptr + 16), mmC);
+ _mm_storeu_si64((__m64 *)(outptr + 24), mmH);
+ }
+ outptr += RGB_PIXELSIZE * 8;
+ } else {
+ col = num_cols;
+ asm(".set noreorder\r\n" /* st16 */
+
+ "li $8, 4\r\n"
+ "move $9, %6\r\n"
+ "move $10, %7\r\n"
+ "mov.s $f4, %2\r\n"
+ "mov.s $f6, %4\r\n"
+ "bltu $9, $8, 1f\r\n"
+ "nop \r\n"
+ "gssdlc1 $f4, 7($10)\r\n"
+ "gssdrc1 $f4, 0($10)\r\n"
+ "gssdlc1 $f6, 7+8($10)\r\n"
+ "gssdrc1 $f6, 8($10)\r\n"
+ "mov.s $f4, %3\r\n"
+ "mov.s $f6, %5\r\n"
+ "subu $9, $9, 4\r\n"
+ PTR_ADDU "$10, $10, 16\r\n"
+
+ "1: \r\n"
+ "li $8, 2\r\n" /* st8 */
+ "bltu $9, $8, 2f\r\n"
+ "nop \r\n"
+ "gssdlc1 $f4, 7($10)\r\n"
+ "gssdrc1 $f4, 0($10)\r\n"
+ "mov.s $f4, $f6\r\n"
+ "subu $9, $9, 2\r\n"
+ PTR_ADDU "$10, $10, 8\r\n"
+
+ "2: \r\n"
+ "li $8, 1\r\n" /* st4 */
+ "bltu $9, $8, 3f\r\n"
+ "nop \r\n"
+ "gsswlc1 $f4, 3($10)\r\n"
+ "gsswrc1 $f4, 0($10)\r\n"
+
+ "3: \r\n"
+ "li %1, 0\r\n" /* end */
+ : "=m" (*outptr), "=r" (col)
+ : "f" (mmA), "f" (mmC), "f" (mmD), "f" (mmH), "r" (col),
+ "r" (outptr)
+ : "$f4", "$f6", "$8", "$9", "$10", "memory"
+ );
+ }
+
+#endif
+
+ }
+ }
+}
+
+#undef mmA
+#undef mmB
+#undef mmC
+#undef mmD
+#undef mmE
+#undef mmF
+#undef mmG
+#undef mmH
diff --git a/media/libjpeg/simd/mips64/jdcolor-mmi.c b/media/libjpeg/simd/mips64/jdcolor-mmi.c
new file mode 100644
index 0000000000..2c58263dbd
--- /dev/null
+++ b/media/libjpeg/simd/mips64/jdcolor-mmi.c
@@ -0,0 +1,139 @@
+/*
+ * Loongson MMI optimizations for libjpeg-turbo
+ *
+ * Copyright (C) 2011, 2015, D. R. Commander. All Rights Reserved.
+ * Copyright (C) 2016-2017, Loongson Technology Corporation Limited, BeiJing.
+ * All Rights Reserved.
+ * Authors: ZhuChen <zhuchen@loongson.cn>
+ * CaiWanwei <caiwanwei@loongson.cn>
+ * SunZhangzhi <sunzhangzhi-cq@loongson.cn>
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/* YCC --> RGB CONVERSION */
+
+#include "jsimd_mmi.h"
+
+
+#define F_0_344 ((short)22554) /* FIX(0.34414) */
+#define F_0_402 ((short)26345) /* FIX(1.40200) - FIX(1) */
+#define F_0_285 ((short)18734) /* FIX(1) - FIX(0.71414) */
+#define F_0_228 ((short)14942) /* FIX(2) - FIX(1.77200) */
+
+enum const_index {
+ index_PW_ONE,
+ index_PW_F0402,
+ index_PW_MF0228,
+ index_PW_MF0344_F0285,
+ index_PD_ONEHALF
+};
+
+static uint64_t const_value[] = {
+ _uint64_set_pi16(1, 1, 1, 1),
+ _uint64_set_pi16(F_0_402, F_0_402, F_0_402, F_0_402),
+ _uint64_set_pi16(-F_0_228, -F_0_228, -F_0_228, -F_0_228),
+ _uint64_set_pi16(F_0_285, -F_0_344, F_0_285, -F_0_344),
+ _uint64_set_pi32((int)(1 << (SCALEBITS - 1)), (int)(1 << (SCALEBITS - 1)))
+};
+
+#define PW_ONE get_const_value(index_PW_ONE)
+#define PW_F0402 get_const_value(index_PW_F0402)
+#define PW_MF0228 get_const_value(index_PW_MF0228)
+#define PW_MF0344_F0285 get_const_value(index_PW_MF0344_F0285)
+#define PD_ONEHALF get_const_value(index_PD_ONEHALF)
+
+#define RGBX_FILLER_0XFF 1
+
+
+#include "jdcolext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+
+#define RGB_RED EXT_RGB_RED
+#define RGB_GREEN EXT_RGB_GREEN
+#define RGB_BLUE EXT_RGB_BLUE
+#define RGB_PIXELSIZE EXT_RGB_PIXELSIZE
+#define jsimd_ycc_rgb_convert_mmi jsimd_ycc_extrgb_convert_mmi
+#include "jdcolext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_ycc_rgb_convert_mmi
+
+#define RGB_RED EXT_RGBX_RED
+#define RGB_GREEN EXT_RGBX_GREEN
+#define RGB_BLUE EXT_RGBX_BLUE
+#define RGB_PIXELSIZE EXT_RGBX_PIXELSIZE
+#define jsimd_ycc_rgb_convert_mmi jsimd_ycc_extrgbx_convert_mmi
+#include "jdcolext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_ycc_rgb_convert_mmi
+
+#define RGB_RED EXT_BGR_RED
+#define RGB_GREEN EXT_BGR_GREEN
+#define RGB_BLUE EXT_BGR_BLUE
+#define RGB_PIXELSIZE EXT_BGR_PIXELSIZE
+#define jsimd_ycc_rgb_convert_mmi jsimd_ycc_extbgr_convert_mmi
+#include "jdcolext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_ycc_rgb_convert_mmi
+
+#define RGB_RED EXT_BGRX_RED
+#define RGB_GREEN EXT_BGRX_GREEN
+#define RGB_BLUE EXT_BGRX_BLUE
+#define RGB_PIXELSIZE EXT_BGRX_PIXELSIZE
+#define jsimd_ycc_rgb_convert_mmi jsimd_ycc_extbgrx_convert_mmi
+#include "jdcolext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_ycc_rgb_convert_mmi
+
+#define RGB_RED EXT_XBGR_RED
+#define RGB_GREEN EXT_XBGR_GREEN
+#define RGB_BLUE EXT_XBGR_BLUE
+#define RGB_PIXELSIZE EXT_XBGR_PIXELSIZE
+#define jsimd_ycc_rgb_convert_mmi jsimd_ycc_extxbgr_convert_mmi
+#include "jdcolext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_ycc_rgb_convert_mmi
+
+#define RGB_RED EXT_XRGB_RED
+#define RGB_GREEN EXT_XRGB_GREEN
+#define RGB_BLUE EXT_XRGB_BLUE
+#define RGB_PIXELSIZE EXT_XRGB_PIXELSIZE
+#define jsimd_ycc_rgb_convert_mmi jsimd_ycc_extxrgb_convert_mmi
+#include "jdcolext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_ycc_rgb_convert_mmi
diff --git a/media/libjpeg/simd/mips64/jdmerge-mmi.c b/media/libjpeg/simd/mips64/jdmerge-mmi.c
new file mode 100644
index 0000000000..0a39bd5680
--- /dev/null
+++ b/media/libjpeg/simd/mips64/jdmerge-mmi.c
@@ -0,0 +1,149 @@
+/*
+ * Loongson MMI optimizations for libjpeg-turbo
+ *
+ * Copyright (C) 2011, 2015, D. R. Commander. All Rights Reserved.
+ * Copyright (C) 2016-2018, Loongson Technology Corporation Limited, BeiJing.
+ * All Rights Reserved.
+ * Authors: ZhangLixia <zhanglixia-hf@loongson.cn>
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/* YCC --> RGB CONVERSION */
+
+#include "jsimd_mmi.h"
+
+
+#define F_0_344 ((short)22554) /* FIX(0.34414) */
+#define F_0_402 ((short)26345) /* FIX(1.40200) - FIX(1) */
+#define F_0_285 ((short)18734) /* FIX(1) - FIX(0.71414) */
+#define F_0_228 ((short)14942) /* FIX(2) - FIX(1.77200) */
+
+enum const_index {
+ index_PW_ONE,
+ index_PW_F0402,
+ index_PW_MF0228,
+ index_PW_MF0344_F0285,
+ index_PD_ONEHALF
+};
+
+static uint64_t const_value[] = {
+ _uint64_set_pi16(1, 1, 1, 1),
+ _uint64_set_pi16(F_0_402, F_0_402, F_0_402, F_0_402),
+ _uint64_set_pi16(-F_0_228, -F_0_228, -F_0_228, -F_0_228),
+ _uint64_set_pi16(F_0_285, -F_0_344, F_0_285, -F_0_344),
+ _uint64_set_pi32((int)(1 << (SCALEBITS - 1)), (int)(1 << (SCALEBITS - 1)))
+};
+
+#define PW_ONE get_const_value(index_PW_ONE)
+#define PW_F0402 get_const_value(index_PW_F0402)
+#define PW_MF0228 get_const_value(index_PW_MF0228)
+#define PW_MF0344_F0285 get_const_value(index_PW_MF0344_F0285)
+#define PD_ONEHALF get_const_value(index_PD_ONEHALF)
+
+#define RGBX_FILLER_0XFF 1
+
+
+#include "jdmrgext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+
+#define RGB_RED EXT_RGB_RED
+#define RGB_GREEN EXT_RGB_GREEN
+#define RGB_BLUE EXT_RGB_BLUE
+#define RGB_PIXELSIZE EXT_RGB_PIXELSIZE
+#define jsimd_h2v1_merged_upsample_mmi jsimd_h2v1_extrgb_merged_upsample_mmi
+#define jsimd_h2v2_merged_upsample_mmi jsimd_h2v2_extrgb_merged_upsample_mmi
+#include "jdmrgext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_h2v1_merged_upsample_mmi
+#undef jsimd_h2v2_merged_upsample_mmi
+
+#define RGB_RED EXT_RGBX_RED
+#define RGB_GREEN EXT_RGBX_GREEN
+#define RGB_BLUE EXT_RGBX_BLUE
+#define RGB_PIXELSIZE EXT_RGBX_PIXELSIZE
+#define jsimd_h2v1_merged_upsample_mmi jsimd_h2v1_extrgbx_merged_upsample_mmi
+#define jsimd_h2v2_merged_upsample_mmi jsimd_h2v2_extrgbx_merged_upsample_mmi
+#include "jdmrgext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_h2v1_merged_upsample_mmi
+#undef jsimd_h2v2_merged_upsample_mmi
+
+#define RGB_RED EXT_BGR_RED
+#define RGB_GREEN EXT_BGR_GREEN
+#define RGB_BLUE EXT_BGR_BLUE
+#define RGB_PIXELSIZE EXT_BGR_PIXELSIZE
+#define jsimd_h2v1_merged_upsample_mmi jsimd_h2v1_extbgr_merged_upsample_mmi
+#define jsimd_h2v2_merged_upsample_mmi jsimd_h2v2_extbgr_merged_upsample_mmi
+#include "jdmrgext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_h2v1_merged_upsample_mmi
+#undef jsimd_h2v2_merged_upsample_mmi
+
+#define RGB_RED EXT_BGRX_RED
+#define RGB_GREEN EXT_BGRX_GREEN
+#define RGB_BLUE EXT_BGRX_BLUE
+#define RGB_PIXELSIZE EXT_BGRX_PIXELSIZE
+#define jsimd_h2v1_merged_upsample_mmi jsimd_h2v1_extbgrx_merged_upsample_mmi
+#define jsimd_h2v2_merged_upsample_mmi jsimd_h2v2_extbgrx_merged_upsample_mmi
+#include "jdmrgext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_h2v1_merged_upsample_mmi
+#undef jsimd_h2v2_merged_upsample_mmi
+
+#define RGB_RED EXT_XBGR_RED
+#define RGB_GREEN EXT_XBGR_GREEN
+#define RGB_BLUE EXT_XBGR_BLUE
+#define RGB_PIXELSIZE EXT_XBGR_PIXELSIZE
+#define jsimd_h2v1_merged_upsample_mmi jsimd_h2v1_extxbgr_merged_upsample_mmi
+#define jsimd_h2v2_merged_upsample_mmi jsimd_h2v2_extxbgr_merged_upsample_mmi
+#include "jdmrgext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_h2v1_merged_upsample_mmi
+#undef jsimd_h2v2_merged_upsample_mmi
+
+#define RGB_RED EXT_XRGB_RED
+#define RGB_GREEN EXT_XRGB_GREEN
+#define RGB_BLUE EXT_XRGB_BLUE
+#define RGB_PIXELSIZE EXT_XRGB_PIXELSIZE
+#define jsimd_h2v1_merged_upsample_mmi jsimd_h2v1_extxrgb_merged_upsample_mmi
+#define jsimd_h2v2_merged_upsample_mmi jsimd_h2v2_extxrgb_merged_upsample_mmi
+#include "jdmrgext-mmi.c"
+#undef RGB_RED
+#undef RGB_GREEN
+#undef RGB_BLUE
+#undef RGB_PIXELSIZE
+#undef jsimd_h2v1_merged_upsample_mmi
+#undef jsimd_h2v2_merged_upsample_mmi
diff --git a/media/libjpeg/simd/mips64/jdmrgext-mmi.c b/media/libjpeg/simd/mips64/jdmrgext-mmi.c
new file mode 100644
index 0000000000..be09ff2a65
--- /dev/null
+++ b/media/libjpeg/simd/mips64/jdmrgext-mmi.c
@@ -0,0 +1,615 @@
+/*
+ * Loongson MMI optimizations for libjpeg-turbo
+ *
+ * Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
+ * Copyright (C) 2015, 2019, D. R. Commander. All Rights Reserved.
+ * Copyright (C) 2016-2018, Loongson Technology Corporation Limited, BeiJing.
+ * All Rights Reserved.
+ * Authors: ZhangLixia <zhanglixia-hf@loongson.cn>
+ *
+ * Based on the x86 SIMD extension for IJG JPEG library
+ * Copyright (C) 1999-2006, MIYASAKA Masaru.
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/* This file is included by jdmerge-mmi.c */
+
+
+#if RGB_RED == 0
+#define mmA re
+#define mmB ro
+#elif RGB_GREEN == 0
+#define mmA ge
+#define mmB go
+#elif RGB_BLUE == 0
+#define mmA be
+#define mmB bo
+#else
+#define mmA xe
+#define mmB xo
+#endif
+
+#if RGB_RED == 1
+#define mmC re
+#define mmD ro
+#elif RGB_GREEN == 1
+#define mmC ge
+#define mmD go
+#elif RGB_BLUE == 1
+#define mmC be
+#define mmD bo
+#else
+#define mmC xe
+#define mmD xo
+#endif
+
+#if RGB_RED == 2
+#define mmE re
+#define mmF ro
+#elif RGB_GREEN == 2
+#define mmE ge
+#define mmF go
+#elif RGB_BLUE == 2
+#define mmE be
+#define mmF bo
+#else
+#define mmE xe
+#define mmF xo
+#endif
+
+#if RGB_RED == 3
+#define mmG re
+#define mmH ro
+#elif RGB_GREEN == 3
+#define mmG ge
+#define mmH go
+#elif RGB_BLUE == 3
+#define mmG be
+#define mmH bo
+#else
+#define mmG xe
+#define mmH xo
+#endif
+
+
+void jsimd_h2v1_merged_upsample_mmi(JDIMENSION output_width,
+ JSAMPIMAGE input_buf,
+ JDIMENSION in_row_group_ctr,
+ JSAMPARRAY output_buf)
+{
+ JSAMPROW outptr, inptr0, inptr1, inptr2;
+ int num_cols, col;
+ __m64 ythise, ythiso, ythis, ynexte, ynexto, ynext, yl, y;
+ __m64 cbl, cbl2, cbh, cbh2, cb, crl, crl2, crh, crh2, cr;
+ __m64 rle, rlo, rl, rhe, rho, rh, re, ro;
+ __m64 ga, gb, gle, glo, gl, gc, gd, ghe, gho, gh, ge, go;
+ __m64 ble, blo, bl, bhe, bho, bh, be, bo, xe = 0.0, xo = 0.0;
+ __m64 decenter, mask, zero = 0.0;
+#if RGB_PIXELSIZE == 4
+ __m64 mm8, mm9;
+#endif
+
+ inptr0 = input_buf[0][in_row_group_ctr];
+ inptr1 = input_buf[1][in_row_group_ctr];
+ inptr2 = input_buf[2][in_row_group_ctr];
+ outptr = output_buf[0];
+
+ for (num_cols = output_width >> 1; num_cols > 0; num_cols -= 8,
+ inptr0 += 16, inptr1 += 8, inptr2 += 8) {
+
+ cb = _mm_load_si64((__m64 *)inptr1);
+ cr = _mm_load_si64((__m64 *)inptr2);
+ ythis = _mm_load_si64((__m64 *)inptr0);
+ ynext = _mm_load_si64((__m64 *)inptr0 + 1);
+
+ mask = decenter = 0.0;
+ mask = _mm_cmpeq_pi16(mask, mask);
+ decenter = _mm_cmpeq_pi16(decenter, decenter);
+ mask = _mm_srli_pi16(mask, BYTE_BIT); /* {0xFF 0x00 0xFF 0x00 ..} */
+ decenter = _mm_slli_pi16(decenter, 7); /* {0xFF80 0xFF80 0xFF80 0xFF80} */
+
+ cbl = _mm_unpacklo_pi8(cb, zero); /* Cb(0123) */
+ cbh = _mm_unpackhi_pi8(cb, zero); /* Cb(4567) */
+ crl = _mm_unpacklo_pi8(cr, zero); /* Cr(0123) */
+ crh = _mm_unpackhi_pi8(cr, zero); /* Cr(4567) */
+ cbl = _mm_add_pi16(cbl, decenter);
+ cbh = _mm_add_pi16(cbh, decenter);
+ crl = _mm_add_pi16(crl, decenter);
+ crh = _mm_add_pi16(crh, decenter);
+
+ /* (Original)
+ * R = Y + 1.40200 * Cr
+ * G = Y - 0.34414 * Cb - 0.71414 * Cr
+ * B = Y + 1.77200 * Cb
+ *
+ * (This implementation)
+ * R = Y + 0.40200 * Cr + Cr
+ * G = Y - 0.34414 * Cb + 0.28586 * Cr - Cr
+ * B = Y - 0.22800 * Cb + Cb + Cb
+ */
+
+ cbl2 = _mm_add_pi16(cbl, cbl); /* 2*CbL */
+ cbh2 = _mm_add_pi16(cbh, cbh); /* 2*CbH */
+ crl2 = _mm_add_pi16(crl, crl); /* 2*CrL */
+ crh2 = _mm_add_pi16(crh, crh); /* 2*CrH */
+
+ bl = _mm_mulhi_pi16(cbl2, PW_MF0228); /* (2*CbL * -FIX(0.22800) */
+ bh = _mm_mulhi_pi16(cbh2, PW_MF0228); /* (2*CbH * -FIX(0.22800) */
+ rl = _mm_mulhi_pi16(crl2, PW_F0402); /* (2*CrL * FIX(0.40200)) */
+ rh = _mm_mulhi_pi16(crh2, PW_F0402); /* (2*CrH * FIX(0.40200)) */
+
+ bl = _mm_add_pi16(bl, PW_ONE);
+ bh = _mm_add_pi16(bh, PW_ONE);
+ bl = _mm_srai_pi16(bl, 1); /* (CbL * -FIX(0.22800)) */
+ bh = _mm_srai_pi16(bh, 1); /* (CbH * -FIX(0.22800)) */
+ rl = _mm_add_pi16(rl, PW_ONE);
+ rh = _mm_add_pi16(rh, PW_ONE);
+ rl = _mm_srai_pi16(rl, 1); /* (CrL * FIX(0.40200)) */
+ rh = _mm_srai_pi16(rh, 1); /* (CrH * FIX(0.40200)) */
+
+ bl = _mm_add_pi16(bl, cbl);
+ bh = _mm_add_pi16(bh, cbh);
+ bl = _mm_add_pi16(bl, cbl); /* (CbL * FIX(1.77200))=(B-Y)L */
+ bh = _mm_add_pi16(bh, cbh); /* (CbH * FIX(1.77200))=(B-Y)H */
+ rl = _mm_add_pi16(rl, crl); /* (CrL * FIX(1.40200))=(R-Y)L */
+ rh = _mm_add_pi16(rh, crh); /* (CrH * FIX(1.40200))=(R-Y)H */
+
+ ga = _mm_unpacklo_pi16(cbl, crl);
+ gb = _mm_unpackhi_pi16(cbl, crl);
+ ga = _mm_madd_pi16(ga, PW_MF0344_F0285);
+ gb = _mm_madd_pi16(gb, PW_MF0344_F0285);
+ gc = _mm_unpacklo_pi16(cbh, crh);
+ gd = _mm_unpackhi_pi16(cbh, crh);
+ gc = _mm_madd_pi16(gc, PW_MF0344_F0285);
+ gd = _mm_madd_pi16(gd, PW_MF0344_F0285);
+
+ ga = _mm_add_pi32(ga, PD_ONEHALF);
+ gb = _mm_add_pi32(gb, PD_ONEHALF);
+ ga = _mm_srai_pi32(ga, SCALEBITS);
+ gb = _mm_srai_pi32(gb, SCALEBITS);
+ gc = _mm_add_pi32(gc, PD_ONEHALF);
+ gd = _mm_add_pi32(gd, PD_ONEHALF);
+ gc = _mm_srai_pi32(gc, SCALEBITS);
+ gd = _mm_srai_pi32(gd, SCALEBITS);
+
+ gl = _mm_packs_pi32(ga, gb); /* CbL*-FIX(0.344)+CrL*FIX(0.285) */
+ gh = _mm_packs_pi32(gc, gd); /* CbH*-FIX(0.344)+CrH*FIX(0.285) */
+ gl = _mm_sub_pi16(gl, crl); /* CbL*-FIX(0.344)+CrL*-FIX(0.714)=(G-Y)L */
+ gh = _mm_sub_pi16(gh, crh); /* CbH*-FIX(0.344)+CrH*-FIX(0.714)=(G-Y)H */
+
+ ythise = _mm_and_si64(mask, ythis); /* Y(0246) */
+ ythiso = _mm_srli_pi16(ythis, BYTE_BIT); /* Y(1357) */
+ ynexte = _mm_and_si64(mask, ynext); /* Y(8ACE) */
+ ynexto = _mm_srli_pi16(ynext, BYTE_BIT); /* Y(9BDF) */
+
+ rle = _mm_add_pi16(rl, ythise); /* (R0 R2 R4 R6) */
+ rlo = _mm_add_pi16(rl, ythiso); /* (R1 R3 R5 R7) */
+ rhe = _mm_add_pi16(rh, ynexte); /* (R8 RA RC RE) */
+ rho = _mm_add_pi16(rh, ynexto); /* (R9 RB RD RF) */
+ re = _mm_packs_pu16(rle, rhe); /* (R0 R2 R4 R6 R8 RA RC RE) */
+ ro = _mm_packs_pu16(rlo, rho); /* (R1 R3 R5 R7 R9 RB RD RF) */
+
+ gle = _mm_add_pi16(gl, ythise); /* (G0 G2 G4 G6) */
+ glo = _mm_add_pi16(gl, ythiso); /* (G1 G3 G5 G7) */
+ ghe = _mm_add_pi16(gh, ynexte); /* (G8 GA GC GE) */
+ gho = _mm_add_pi16(gh, ynexto); /* (G9 GB GD GF) */
+ ge = _mm_packs_pu16(gle, ghe); /* (G0 G2 G4 G6 G8 GA GC GE) */
+ go = _mm_packs_pu16(glo, gho); /* (G1 G3 G5 G7 G9 GB GD GF) */
+
+ ble = _mm_add_pi16(bl, ythise); /* (B0 B2 B4 B6) */
+ blo = _mm_add_pi16(bl, ythiso); /* (B1 B3 B5 B7) */
+ bhe = _mm_add_pi16(bh, ynexte); /* (B8 BA BC BE) */
+ bho = _mm_add_pi16(bh, ynexto); /* (B9 BB BD BF) */
+ be = _mm_packs_pu16(ble, bhe); /* (B0 B2 B4 B6 B8 BA BC BE) */
+ bo = _mm_packs_pu16(blo, bho); /* (B1 B3 B5 B7 B9 BB BD BF) */
+
+#if RGB_PIXELSIZE == 3
+
+ /* mmA=(00 02 04 06 08 0A 0C 0E), mmB=(01 03 05 07 09 0B 0D 0F) */
+ /* mmC=(10 12 14 16 18 1A 1C 1E), mmD=(11 13 15 17 19 1B 1D 1F) */
+ /* mmE=(20 22 24 26 28 2A 2C 2E), mmF=(21 23 25 27 29 2B 2D 2F) */
+ mmG = _mm_unpacklo_pi8(mmA, mmC); /* (00 10 02 12 04 14 06 16) */
+ mmA = _mm_unpackhi_pi8(mmA, mmC); /* (08 18 0A 1A 0C 1C 0E 1E) */
+ mmH = _mm_unpacklo_pi8(mmE, mmB); /* (20 01 22 03 24 05 26 07) */
+ mmE = _mm_unpackhi_pi8(mmE, mmB); /* (28 09 2A 0B 2C 0D 2E 0F) */
+ mmC = _mm_unpacklo_pi8(mmD, mmF); /* (11 21 13 23 15 25 17 27) */
+ mmD = _mm_unpackhi_pi8(mmD, mmF); /* (19 29 1B 2B 1D 2D 1F 2F) */
+
+ mmB = _mm_unpacklo_pi16(mmG, mmA); /* (00 10 08 18 02 12 0A 1A) */
+ mmA = _mm_unpackhi_pi16(mmG, mmA); /* (04 14 0C 1C 06 16 0E 1E) */
+ mmF = _mm_unpacklo_pi16(mmH, mmE); /* (20 01 28 09 22 03 2A 0B) */
+ mmE = _mm_unpackhi_pi16(mmH, mmE); /* (24 05 2C 0D 26 07 2E 0F) */
+ mmH = _mm_unpacklo_pi16(mmC, mmD); /* (11 21 19 29 13 23 1B 2B) */
+ mmG = _mm_unpackhi_pi16(mmC, mmD); /* (15 25 1D 2D 17 27 1F 2F) */
+
+ mmC = _mm_unpacklo_pi16(mmB, mmF); /* (00 10 20 01 08 18 28 09) */
+ mmB = _mm_srli_si64(mmB, 4 * BYTE_BIT);
+ mmB = _mm_unpacklo_pi16(mmH, mmB); /* (11 21 02 12 19 29 0A 1A) */
+ mmD = _mm_unpackhi_pi16(mmF, mmH); /* (22 03 13 23 2A 0B 1B 2B) */
+ mmF = _mm_unpacklo_pi16(mmA, mmE); /* (04 14 24 05 0C 1C 2C 0D) */
+ mmA = _mm_srli_si64(mmA, 4 * BYTE_BIT);
+ mmH = _mm_unpacklo_pi16(mmG, mmA); /* (15 25 06 16 1D 2D 0E 1E) */
+ mmG = _mm_unpackhi_pi16(mmE, mmG); /* (26 07 17 27 2E 0F 1F 2F) */
+
+ mmA = _mm_unpacklo_pi32(mmC, mmB); /* (00 10 20 01 11 21 02 12) */
+ mmE = _mm_unpackhi_pi32(mmC, mmB); /* (08 18 28 09 19 29 0A 1A) */
+ mmB = _mm_unpacklo_pi32(mmD, mmF); /* (22 03 13 23 04 14 24 05) */
+ mmF = _mm_unpackhi_pi32(mmD, mmF); /* (2A 0B 1B 2B 0C 1C 2C 0D) */
+ mmC = _mm_unpacklo_pi32(mmH, mmG); /* (15 25 06 16 26 07 17 27) */
+ mmG = _mm_unpackhi_pi32(mmH, mmG); /* (1D 2D 0E 1E 2E 0F 1F 2F) */
+
+ if (num_cols >= 8) {
+ if (!(((long)outptr) & 7)) {
+ _mm_store_si64((__m64 *)outptr, mmA);
+ _mm_store_si64((__m64 *)(outptr + 8), mmB);
+ _mm_store_si64((__m64 *)(outptr + 16), mmC);
+ _mm_store_si64((__m64 *)(outptr + 24), mmE);
+ _mm_store_si64((__m64 *)(outptr + 32), mmF);
+ _mm_store_si64((__m64 *)(outptr + 40), mmG);
+ } else {
+ _mm_storeu_si64((__m64 *)outptr, mmA);
+ _mm_storeu_si64((__m64 *)(outptr + 8), mmB);
+ _mm_storeu_si64((__m64 *)(outptr + 16), mmC);
+ _mm_storeu_si64((__m64 *)(outptr + 24), mmE);
+ _mm_storeu_si64((__m64 *)(outptr + 32), mmF);
+ _mm_storeu_si64((__m64 *)(outptr + 40), mmG);
+ }
+ outptr += RGB_PIXELSIZE * 16;
+ } else {
+ if (output_width & 1)
+ col = num_cols * 6 + 3;
+ else
+ col = num_cols * 6;
+
+ asm(".set noreorder\r\n" /* st24 */
+
+ "li $8, 24\r\n"
+ "move $9, %7\r\n"
+ "mov.s $f4, %1\r\n"
+ "mov.s $f6, %2\r\n"
+ "mov.s $f8, %3\r\n"
+ "move $10, %8\r\n"
+ "bltu $9, $8, 1f\r\n"
+ "nop \r\n"
+ "gssdlc1 $f4, 7($10)\r\n"
+ "gssdrc1 $f4, 0($10)\r\n"
+ "gssdlc1 $f6, 7+8($10)\r\n"
+ "gssdrc1 $f6, 8($10)\r\n"
+ "gssdlc1 $f8, 7+16($10)\r\n"
+ "gssdrc1 $f8, 16($10)\r\n"
+ "mov.s $f4, %4\r\n"
+ "mov.s $f6, %5\r\n"
+ "mov.s $f8, %6\r\n"
+ "subu $9, $9, 24\r\n"
+ PTR_ADDU "$10, $10, 24\r\n"
+
+ "1: \r\n"
+ "li $8, 16\r\n" /* st16 */
+ "bltu $9, $8, 2f\r\n"
+ "nop \r\n"
+ "gssdlc1 $f4, 7($10)\r\n"
+ "gssdrc1 $f4, 0($10)\r\n"
+ "gssdlc1 $f6, 7+8($10)\r\n"
+ "gssdrc1 $f6, 8($10)\r\n"
+ "mov.s $f4, $f8\r\n"
+ "subu $9, $9, 16\r\n"
+ PTR_ADDU "$10, $10, 16\r\n"
+
+ "2: \r\n"
+ "li $8, 8\r\n" /* st8 */
+ "bltu $9, $8, 3f\r\n"
+ "nop \r\n"
+ "gssdlc1 $f4, 7($10)\r\n"
+ "gssdrc1 $f4, 0($10)\r\n"
+ "mov.s $f4, $f6\r\n"
+ "subu $9, $9, 8\r\n"
+ PTR_ADDU "$10, $10, 8\r\n"
+
+ "3: \r\n"
+ "li $8, 4\r\n" /* st4 */
+ "mfc1 $11, $f4\r\n"
+ "bltu $9, $8, 4f\r\n"
+ "nop \r\n"
+ "swl $11, 3($10)\r\n"
+ "swr $11, 0($10)\r\n"
+ "li $8, 32\r\n"
+ "mtc1 $8, $f6\r\n"
+ "dsrl $f4, $f4, $f6\r\n"
+ "mfc1 $11, $f4\r\n"
+ "subu $9, $9, 4\r\n"
+ PTR_ADDU "$10, $10, 4\r\n"
+
+ "4: \r\n"
+ "li $8, 2\r\n" /* st2 */
+ "bltu $9, $8, 5f\r\n"
+ "nop \r\n"
+ "ush $11, 0($10)\r\n"
+ "srl $11, 16\r\n"
+ "subu $9, $9, 2\r\n"
+ PTR_ADDU "$10, $10, 2\r\n"
+
+ "5: \r\n"
+ "li $8, 1\r\n" /* st1 */
+ "bltu $9, $8, 6f\r\n"
+ "nop \r\n"
+ "sb $11, 0($10)\r\n"
+
+ "6: \r\n"
+ "nop \r\n" /* end */
+ : "=m" (*outptr)
+ : "f" (mmA), "f" (mmB), "f" (mmC), "f" (mmE), "f" (mmF),
+ "f" (mmG), "r" (col), "r" (outptr)
+ : "$f4", "$f6", "$f8", "$8", "$9", "$10", "$11", "memory"
+ );
+ }
+
+#else /* RGB_PIXELSIZE == 4 */
+
+#ifdef RGBX_FILLER_0XFF
+ xe = _mm_cmpeq_pi8(xe, xe);
+ xo = _mm_cmpeq_pi8(xo, xo);
+#else
+ xe = _mm_xor_si64(xe, xe);
+ xo = _mm_xor_si64(xo, xo);
+#endif
+ /* mmA=(00 02 04 06 08 0A 0C 0E), mmB=(01 03 05 07 09 0B 0D 0F) */
+ /* mmC=(10 12 14 16 18 1A 1C 1E), mmD=(11 13 15 17 19 1B 1D 1F) */
+ /* mmE=(20 22 24 26 28 2A 2C 2E), mmF=(21 23 25 27 29 2B 2D 2F) */
+ /* mmG=(30 32 34 36 38 3A 3C 3E), mmH=(31 33 35 37 39 3B 3D 3F) */
+
+ mm8 = _mm_unpacklo_pi8(mmA, mmC); /* (00 10 02 12 04 14 06 16) */
+ mm9 = _mm_unpackhi_pi8(mmA, mmC); /* (08 18 0A 1A 0C 1C 0E 1E) */
+ mmA = _mm_unpacklo_pi8(mmE, mmG); /* (20 30 22 32 24 34 26 36) */
+ mmE = _mm_unpackhi_pi8(mmE, mmG); /* (28 38 2A 3A 2C 3C 2E 3E) */
+
+ mmG = _mm_unpacklo_pi8(mmB, mmD); /* (01 11 03 13 05 15 07 17) */
+ mmB = _mm_unpackhi_pi8(mmB, mmD); /* (09 19 0B 1B 0D 1D 0F 1F) */
+ mmD = _mm_unpacklo_pi8(mmF, mmH); /* (21 31 23 33 25 35 27 37) */
+ mmF = _mm_unpackhi_pi8(mmF, mmH); /* (29 39 2B 3B 2D 3D 2F 3F) */
+
+ mmH = _mm_unpacklo_pi16(mm8, mmA); /* (00 10 20 30 02 12 22 32) */
+ mm8 = _mm_unpackhi_pi16(mm8, mmA); /* (04 14 24 34 06 16 26 36) */
+ mmA = _mm_unpacklo_pi16(mmG, mmD); /* (01 11 21 31 03 13 23 33) */
+ mmD = _mm_unpackhi_pi16(mmG, mmD); /* (05 15 25 35 07 17 27 37) */
+
+ mmG = _mm_unpackhi_pi16(mm9, mmE); /* (0C 1C 2C 3C 0E 1E 2E 3E) */
+ mm9 = _mm_unpacklo_pi16(mm9, mmE); /* (08 18 28 38 0A 1A 2A 3A) */
+ mmE = _mm_unpacklo_pi16(mmB, mmF); /* (09 19 29 39 0B 1B 2B 3B) */
+ mmF = _mm_unpackhi_pi16(mmB, mmF); /* (0D 1D 2D 3D 0F 1F 2F 3F) */
+
+ mmB = _mm_unpackhi_pi32(mmH, mmA); /* (02 12 22 32 03 13 23 33) */
+ mmA = _mm_unpacklo_pi32(mmH, mmA); /* (00 10 20 30 01 11 21 31) */
+ mmC = _mm_unpacklo_pi32(mm8, mmD); /* (04 14 24 34 05 15 25 35) */
+ mmD = _mm_unpackhi_pi32(mm8, mmD); /* (06 16 26 36 07 17 27 37) */
+
+ mmH = _mm_unpackhi_pi32(mmG, mmF); /* (0E 1E 2E 3E 0F 1F 2F 3F) */
+ mmG = _mm_unpacklo_pi32(mmG, mmF); /* (0C 1C 2C 3C 0D 1D 2D 3D) */
+ mmF = _mm_unpackhi_pi32(mm9, mmE); /* (0A 1A 2A 3A 0B 1B 2B 3B) */
+ mmE = _mm_unpacklo_pi32(mm9, mmE); /* (08 18 28 38 09 19 29 39) */
+
+ if (num_cols >= 8) {
+ if (!(((long)outptr) & 7)) {
+ _mm_store_si64((__m64 *)outptr, mmA);
+ _mm_store_si64((__m64 *)(outptr + 8), mmB);
+ _mm_store_si64((__m64 *)(outptr + 16), mmC);
+ _mm_store_si64((__m64 *)(outptr + 24), mmD);
+ _mm_store_si64((__m64 *)(outptr + 32), mmE);
+ _mm_store_si64((__m64 *)(outptr + 40), mmF);
+ _mm_store_si64((__m64 *)(outptr + 48), mmG);
+ _mm_store_si64((__m64 *)(outptr + 56), mmH);
+ } else {
+ _mm_storeu_si64((__m64 *)outptr, mmA);
+ _mm_storeu_si64((__m64 *)(outptr + 8), mmB);
+ _mm_storeu_si64((__m64 *)(outptr + 16), mmC);
+ _mm_storeu_si64((__m64 *)(outptr + 24), mmD);
+ _mm_storeu_si64((__m64 *)(outptr + 32), mmE);
+ _mm_storeu_si64((__m64 *)(outptr + 40), mmF);
+ _mm_storeu_si64((__m64 *)(outptr + 48), mmG);
+ _mm_storeu_si64((__m64 *)(outptr + 56), mmH);
+ }
+ outptr += RGB_PIXELSIZE * 16;
+ } else {
+ if (output_width & 1)
+ col = num_cols * 2 + 1;
+ else
+ col = num_cols * 2;
+ asm(".set noreorder\r\n" /* st32 */
+
+ "li $8, 8\r\n"
+ "move $9, %10\r\n"
+ "move $10, %11\r\n"
+ "mov.s $f4, %2\r\n"
+ "mov.s $f6, %3\r\n"
+ "mov.s $f8, %4\r\n"
+ "mov.s $f10, %5\r\n"
+ "bltu $9, $8, 1f\r\n"
+ "nop \r\n"
+ "gssdlc1 $f4, 7($10)\r\n"
+ "gssdrc1 $f4, 0($10)\r\n"
+ "gssdlc1 $f6, 7+8($10)\r\n"
+ "gssdrc1 $f6, 8($10)\r\n"
+ "gssdlc1 $f8, 7+16($10)\r\n"
+ "gssdrc1 $f8, 16($10)\r\n"
+ "gssdlc1 $f10, 7+24($10)\r\n"
+ "gssdrc1 $f10, 24($10)\r\n"
+ "mov.s $f4, %6\r\n"
+ "mov.s $f6, %7\r\n"
+ "mov.s $f8, %8\r\n"
+ "mov.s $f10, %9\r\n"
+ "subu $9, $9, 8\r\n"
+ PTR_ADDU "$10, $10, 32\r\n"
+
+ "1: \r\n"
+ "li $8, 4\r\n" /* st16 */
+ "bltu $9, $8, 2f\r\n"
+ "nop \r\n"
+ "gssdlc1 $f4, 7($10)\r\n"
+ "gssdrc1 $f4, 0($10)\r\n"
+ "gssdlc1 $f6, 7+8($10)\r\n"
+ "gssdrc1 $f6, 8($10)\r\n"
+ "mov.s $f4, $f8\r\n"
+ "mov.s $f6, $f10\r\n"
+ "subu $9, $9, 4\r\n"
+ PTR_ADDU "$10, $10, 16\r\n"
+
+ "2: \r\n"
+ "li $8, 2\r\n" /* st8 */
+ "bltu $9, $8, 3f\r\n"
+ "nop \r\n"
+ "gssdlc1 $f4, 7($10)\r\n"
+ "gssdrc1 $f4, 0($10)\r\n"
+ "mov.s $f4, $f6\r\n"
+ "subu $9, $9, 2\r\n"
+ PTR_ADDU "$10, $10, 8\r\n"
+
+ "3: \r\n"
+ "li $8, 1\r\n" /* st4 */
+ "bltu $9, $8, 4f\r\n"
+ "nop \r\n"
+ "gsswlc1 $f4, 3($10)\r\n"
+ "gsswrc1 $f4, 0($10)\r\n"
+
+ "4: \r\n"
+ "li %1, 0\r\n" /* end */
+ : "=m" (*outptr), "=r" (col)
+ : "f" (mmA), "f" (mmB), "f" (mmC), "f" (mmD), "f" (mmE), "f" (mmF),
+ "f" (mmG), "f" (mmH), "r" (col), "r" (outptr)
+ : "$f4", "$f6", "$f8", "$f10", "$8", "$9", "$10", "memory"
+ );
+ }
+
+#endif
+
+ }
+
+ if (!((output_width >> 1) & 7)) {
+ if (output_width & 1) {
+ cb = _mm_load_si64((__m64 *)inptr1);
+ cr = _mm_load_si64((__m64 *)inptr2);
+ y = _mm_load_si64((__m64 *)inptr0);
+
+ decenter = 0.0;
+ decenter = _mm_cmpeq_pi16(decenter, decenter);
+ decenter = _mm_slli_pi16(decenter, 7); /* {0xFF80 0xFF80 0xFF80 0xFF80} */
+
+ cbl = _mm_unpacklo_pi8(cb, zero); /* Cb(0123) */
+ crl = _mm_unpacklo_pi8(cr, zero); /* Cr(0123) */
+ cbl = _mm_add_pi16(cbl, decenter);
+ crl = _mm_add_pi16(crl, decenter);
+
+ cbl2 = _mm_add_pi16(cbl, cbl); /* 2*CbL */
+ crl2 = _mm_add_pi16(crl, crl); /* 2*CrL */
+ bl = _mm_mulhi_pi16(cbl2, PW_MF0228); /* (2*CbL * -FIX(0.22800) */
+ rl = _mm_mulhi_pi16(crl2, PW_F0402); /* (2*CrL * FIX(0.40200)) */
+
+ bl = _mm_add_pi16(bl, PW_ONE);
+ bl = _mm_srai_pi16(bl, 1); /* (CbL * -FIX(0.22800)) */
+ rl = _mm_add_pi16(rl, PW_ONE);
+ rl = _mm_srai_pi16(rl, 1); /* (CrL * FIX(0.40200)) */
+
+ bl = _mm_add_pi16(bl, cbl);
+ bl = _mm_add_pi16(bl, cbl); /* (CbL * FIX(1.77200))=(B-Y)L */
+ rl = _mm_add_pi16(rl, crl); /* (CrL * FIX(1.40200))=(R-Y)L */
+
+ gl = _mm_unpacklo_pi16(cbl, crl);
+ gl = _mm_madd_pi16(gl, PW_MF0344_F0285);
+ gl = _mm_add_pi32(gl, PD_ONEHALF);
+ gl = _mm_srai_pi32(gl, SCALEBITS);
+ gl = _mm_packs_pi32(gl, zero); /* CbL*-FIX(0.344)+CrL*FIX(0.285) */
+ gl = _mm_sub_pi16(gl, crl); /* CbL*-FIX(0.344)+CrL*-FIX(0.714)=(G-Y)L */
+
+ yl = _mm_unpacklo_pi8(y, zero); /* Y(0123) */
+ rl = _mm_add_pi16(rl, yl); /* (R0 R1 R2 R3) */
+ gl = _mm_add_pi16(gl, yl); /* (G0 G1 G2 G3) */
+ bl = _mm_add_pi16(bl, yl); /* (B0 B1 B2 B3) */
+ re = _mm_packs_pu16(rl, rl);
+ ge = _mm_packs_pu16(gl, gl);
+ be = _mm_packs_pu16(bl, bl);
+#if RGB_PIXELSIZE == 3
+ mmA = _mm_unpacklo_pi8(mmA, mmC);
+ mmA = _mm_unpacklo_pi16(mmA, mmE);
+ asm(".set noreorder\r\n"
+
+ "move $8, %2\r\n"
+ "mov.s $f4, %1\r\n"
+ "mfc1 $9, $f4\r\n"
+ "ush $9, 0($8)\r\n"
+ "srl $9, 16\r\n"
+ "sb $9, 2($8)\r\n"
+ : "=m" (*outptr)
+ : "f" (mmA), "r" (outptr)
+ : "$f4", "$8", "$9", "memory"
+ );
+#else /* RGB_PIXELSIZE == 4 */
+
+#ifdef RGBX_FILLER_0XFF
+ xe = _mm_cmpeq_pi8(xe, xe);
+#else
+ xe = _mm_xor_si64(xe, xe);
+#endif
+ mmA = _mm_unpacklo_pi8(mmA, mmC);
+ mmE = _mm_unpacklo_pi8(mmE, mmG);
+ mmA = _mm_unpacklo_pi16(mmA, mmE);
+ asm(".set noreorder\r\n"
+
+ "move $8, %2\r\n"
+ "mov.s $f4, %1\r\n"
+ "gsswlc1 $f4, 3($8)\r\n"
+ "gsswrc1 $f4, 0($8)\r\n"
+ : "=m" (*outptr)
+ : "f" (mmA), "r" (outptr)
+ : "$f4", "$8", "memory"
+ );
+#endif
+ }
+ }
+}
+
+
+void jsimd_h2v2_merged_upsample_mmi(JDIMENSION output_width,
+ JSAMPIMAGE input_buf,
+ JDIMENSION in_row_group_ctr,
+ JSAMPARRAY output_buf)
+{
+ JSAMPROW inptr, outptr;
+
+ inptr = input_buf[0][in_row_group_ctr];
+ outptr = output_buf[0];
+
+ input_buf[0][in_row_group_ctr] = input_buf[0][in_row_group_ctr * 2];
+ jsimd_h2v1_merged_upsample_mmi(output_width, input_buf, in_row_group_ctr,
+ output_buf);
+
+ input_buf[0][in_row_group_ctr] = input_buf[0][in_row_group_ctr * 2 + 1];
+ output_buf[0] = output_buf[1];
+ jsimd_h2v1_merged_upsample_mmi(output_width, input_buf, in_row_group_ctr,
+ output_buf);
+
+ input_buf[0][in_row_group_ctr] = inptr;
+ output_buf[0] = outptr;
+}
+
+
+#undef mmA
+#undef mmB
+#undef mmC
+#undef mmD
+#undef mmE
+#undef mmF
+#undef mmG
+#undef mmH
diff --git a/media/libjpeg/simd/mips64/jdsample-mmi.c b/media/libjpeg/simd/mips64/jdsample-mmi.c
new file mode 100644
index 0000000000..8ae94e7dcf
--- /dev/null
+++ b/media/libjpeg/simd/mips64/jdsample-mmi.c
@@ -0,0 +1,304 @@
+/*
+ * Loongson MMI optimizations for libjpeg-turbo
+ *
+ * Copyright (C) 2015, 2018-2019, D. R. Commander. All Rights Reserved.
+ * Copyright (C) 2016-2018, Loongson Technology Corporation Limited, BeiJing.
+ * All Rights Reserved.
+ * Authors: ZhuChen <zhuchen@loongson.cn>
+ * CaiWanwei <caiwanwei@loongson.cn>
+ * SunZhangzhi <sunzhangzhi-cq@loongson.cn>
+ * ZhangLixia <zhanglixia-hf@loongson.cn>
+ *
+ * Based on the x86 SIMD extension for IJG JPEG library
+ * Copyright (C) 1999-2006, MIYASAKA Masaru.
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/* CHROMA UPSAMPLING */
+
+#include "jsimd_mmi.h"
+
+
+enum const_index {
+ index_PW_ONE,
+ index_PW_TWO,
+ index_PW_THREE,
+ index_PW_SEVEN,
+ index_PW_EIGHT,
+};
+
+static uint64_t const_value[] = {
+ _uint64_set_pi16(1, 1, 1, 1),
+ _uint64_set_pi16(2, 2, 2, 2),
+ _uint64_set_pi16(3, 3, 3, 3),
+ _uint64_set_pi16(7, 7, 7, 7),
+ _uint64_set_pi16(8, 8, 8, 8),
+};
+
+#define PW_ONE get_const_value(index_PW_ONE)
+#define PW_TWO get_const_value(index_PW_TWO)
+#define PW_THREE get_const_value(index_PW_THREE)
+#define PW_SEVEN get_const_value(index_PW_SEVEN)
+#define PW_EIGHT get_const_value(index_PW_EIGHT)
+
+
+#define PROCESS_ROW(row, wkoffset, bias1, bias2, shift) { \
+ __m64 samp123X, samp3XXX, samp1234, sampX012, samp_1012; \
+ __m64 sampXXX4, sampX456, samp3456, samp567X, samp7XXX, samp5678; \
+ __m64 outle, outhe, outlo, outho, outl, outh; \
+ \
+ samp123X = _mm_srli_si64(samp0123, 2 * BYTE_BIT); /* ( 1 2 3 -) */ \
+ sampXXX4 = _mm_slli_si64(samp4567, (SIZEOF_MMWORD - 2) * BYTE_BIT); /* ( - - - 4) */ \
+ samp3XXX = _mm_srli_si64(samp0123, (SIZEOF_MMWORD - 2) * BYTE_BIT); /* ( 3 - - -) */ \
+ sampX456 = _mm_slli_si64(samp4567, 2 * BYTE_BIT); /* ( - 4 5 6) */ \
+ \
+ samp1234 = _mm_or_si64(samp123X, sampXXX4); /* ( 1 2 3 4) */ \
+ samp3456 = _mm_or_si64(samp3XXX, sampX456); /* ( 3 4 5 6) */ \
+ \
+ sampX012 = _mm_slli_si64(samp0123, 2 * BYTE_BIT); /* ( - 0 1 2) */ \
+ samp567X = _mm_srli_si64(samp4567, 2 * BYTE_BIT); /* ( 5 6 7 -) */ \
+ samp7XXX = _mm_srli_si64(samp4567, (SIZEOF_MMWORD - 2) * BYTE_BIT); /* ( 7 - - -) */ \
+ \
+ samp_1012 = _mm_or_si64(sampX012, wk[row]); /* (-1 0 1 2) */ \
+ samp5678 = _mm_or_si64(samp567X, wk[row + wkoffset]); /* ( 5 6 7 8) */ \
+ \
+ wk[row] = samp7XXX; \
+ \
+ samp0123 = _mm_mullo_pi16(samp0123, PW_THREE); \
+ samp4567 = _mm_mullo_pi16(samp4567, PW_THREE); \
+ samp_1012 = _mm_add_pi16(samp_1012, bias1); \
+ samp3456 = _mm_add_pi16(samp3456, bias1); \
+ samp1234 = _mm_add_pi16(samp1234, bias2); \
+ samp5678 = _mm_add_pi16(samp5678, bias2); \
+ \
+ outle = _mm_add_pi16(samp_1012, samp0123); \
+ outhe = _mm_add_pi16(samp3456, samp4567); \
+ outle = _mm_srli_pi16(outle, shift); /* ( 0 2 4 6) */ \
+ outhe = _mm_srli_pi16(outhe, shift); /* ( 8 10 12 14) */ \
+ outlo = _mm_add_pi16(samp1234, samp0123); \
+ outho = _mm_add_pi16(samp5678, samp4567); \
+ outlo = _mm_srli_pi16(outlo, shift); /* ( 1 3 5 7) */ \
+ outho = _mm_srli_pi16(outho, shift); /* ( 9 11 13 15) */ \
+ \
+ outlo = _mm_slli_pi16(outlo, BYTE_BIT); \
+ outho = _mm_slli_pi16(outho, BYTE_BIT); \
+ outl = _mm_or_si64(outle, outlo); /* ( 0 1 2 3 4 5 6 7) */ \
+ outh = _mm_or_si64(outhe, outho); /* ( 8 9 10 11 12 13 14 15) */ \
+ \
+ _mm_store_si64((__m64 *)outptr##row, outl); \
+ _mm_store_si64((__m64 *)outptr##row + 1, outh); \
+}
+
+void jsimd_h2v2_fancy_upsample_mmi(int max_v_samp_factor,
+ JDIMENSION downsampled_width,
+ JSAMPARRAY input_data,
+ JSAMPARRAY *output_data_ptr)
+{
+ JSAMPARRAY output_data = *output_data_ptr;
+ JSAMPROW inptr_1, inptr0, inptr1, outptr0, outptr1;
+ int inrow, outrow, incol, tmp, tmp1;
+ __m64 this_1l, this_1h, this_1, thiscolsum_1l, thiscolsum_1h;
+ __m64 this0l, this0h, this0;
+ __m64 this1l, this1h, this1, thiscolsum1l, thiscolsum1h;
+ __m64 next_1l, next_1h, next_1, nextcolsum_1l, nextcolsum_1h;
+ __m64 next0l, next0h, next0;
+ __m64 next1l, next1h, next1, nextcolsum1l, nextcolsum1h;
+ __m64 mask0 = 0.0, masklast, samp0123, samp4567, wk[4], zero = 0.0;
+
+ mask0 = _mm_cmpeq_pi8(mask0, mask0);
+ masklast = _mm_slli_si64(mask0, (SIZEOF_MMWORD - 2) * BYTE_BIT);
+ mask0 = _mm_srli_si64(mask0, (SIZEOF_MMWORD - 2) * BYTE_BIT);
+
+ for (inrow = 0, outrow = 0; outrow < max_v_samp_factor; inrow++) {
+
+ inptr_1 = input_data[inrow - 1];
+ inptr0 = input_data[inrow];
+ inptr1 = input_data[inrow + 1];
+ outptr0 = output_data[outrow++];
+ outptr1 = output_data[outrow++];
+
+ if (downsampled_width & 7) {
+ tmp = (downsampled_width - 1) * sizeof(JSAMPLE);
+ tmp1 = downsampled_width * sizeof(JSAMPLE);
+ asm(PTR_ADDU "$8, %3, %6\r\n"
+ "lb $9, ($8)\r\n"
+ PTR_ADDU "$8, %3, %7\r\n"
+ "sb $9, ($8)\r\n"
+ PTR_ADDU "$8, %4, %6\r\n"
+ "lb $9, ($8)\r\n"
+ PTR_ADDU "$8, %4, %7\r\n"
+ "sb $9, ($8)\r\n"
+ PTR_ADDU "$8, %5, %6\r\n"
+ "lb $9, ($8)\r\n"
+ PTR_ADDU "$8, %5, %7\r\n"
+ "sb $9, ($8)\r\n"
+ : "=m" (*inptr_1), "=m" (*inptr0), "=m" (*inptr1)
+ : "r" (inptr_1), "r" (inptr0), "r" (inptr1), "r" (tmp), "r" (tmp1)
+ : "$8", "$9"
+ );
+ }
+
+ /* process the first column block */
+ this0 = _mm_load_si64((__m64 *)inptr0); /* row[ 0][0] */
+ this_1 = _mm_load_si64((__m64 *)inptr_1); /* row[-1][0] */
+ this1 = _mm_load_si64((__m64 *)inptr1); /* row[ 1][0] */
+
+ this0l = _mm_unpacklo_pi8(this0, zero); /* row[ 0][0]( 0 1 2 3) */
+ this0h = _mm_unpackhi_pi8(this0, zero); /* row[ 0][0]( 4 5 6 7) */
+ this_1l = _mm_unpacklo_pi8(this_1, zero); /* row[-1][0]( 0 1 2 3) */
+ this_1h = _mm_unpackhi_pi8(this_1, zero); /* row[-1][0]( 4 5 6 7) */
+ this1l = _mm_unpacklo_pi8(this1, zero); /* row[+1][0]( 0 1 2 3) */
+ this1h = _mm_unpackhi_pi8(this1, zero); /* row[+1][0]( 4 5 6 7) */
+
+ this0l = _mm_mullo_pi16(this0l, PW_THREE);
+ this0h = _mm_mullo_pi16(this0h, PW_THREE);
+
+ thiscolsum_1l = _mm_add_pi16(this_1l, this0l); /* ( 0 1 2 3) */
+ thiscolsum_1h = _mm_add_pi16(this_1h, this0h); /* ( 4 5 6 7) */
+ thiscolsum1l = _mm_add_pi16(this0l, this1l); /* ( 0 1 2 3) */
+ thiscolsum1h = _mm_add_pi16(this0h, this1h); /* ( 4 5 6 7) */
+
+ /* temporarily save the intermediate data */
+ _mm_store_si64((__m64 *)outptr0, thiscolsum_1l);
+ _mm_store_si64((__m64 *)outptr0 + 1, thiscolsum_1h);
+ _mm_store_si64((__m64 *)outptr1, thiscolsum1l);
+ _mm_store_si64((__m64 *)outptr1 + 1, thiscolsum1h);
+
+ wk[0] = _mm_and_si64(thiscolsum_1l, mask0); /* ( 0 - - -) */
+ wk[1] = _mm_and_si64(thiscolsum1l, mask0); /* ( 0 - - -) */
+
+ for (incol = downsampled_width; incol > 0;
+ incol -= 8, inptr_1 += 8, inptr0 += 8, inptr1 += 8,
+ outptr0 += 16, outptr1 += 16) {
+
+ if (incol > 8) {
+ /* process the next column block */
+ next0 = _mm_load_si64((__m64 *)inptr0 + 1); /* row[ 0][1] */
+ next_1 = _mm_load_si64((__m64 *)inptr_1 + 1); /* row[-1][1] */
+ next1 = _mm_load_si64((__m64 *)inptr1 + 1); /* row[+1][1] */
+
+ next0l = _mm_unpacklo_pi8(next0, zero); /* row[ 0][1]( 0 1 2 3) */
+ next0h = _mm_unpackhi_pi8(next0, zero); /* row[ 0][1]( 4 5 6 7) */
+ next_1l = _mm_unpacklo_pi8(next_1, zero); /* row[-1][1]( 0 1 2 3) */
+ next_1h = _mm_unpackhi_pi8(next_1, zero); /* row[-1][1]( 4 5 6 7) */
+ next1l = _mm_unpacklo_pi8(next1, zero); /* row[+1][1]( 0 1 2 3) */
+ next1h = _mm_unpackhi_pi8(next1, zero); /* row[+1][1]( 4 5 6 7) */
+
+ next0l = _mm_mullo_pi16(next0l, PW_THREE);
+ next0h = _mm_mullo_pi16(next0h, PW_THREE);
+
+ nextcolsum_1l = _mm_add_pi16(next_1l, next0l); /* ( 0 1 2 3) */
+ nextcolsum_1h = _mm_add_pi16(next_1h, next0h); /* ( 4 5 6 7) */
+ nextcolsum1l = _mm_add_pi16(next0l, next1l); /* ( 0 1 2 3) */
+ nextcolsum1h = _mm_add_pi16(next0h, next1h); /* ( 4 5 6 7) */
+
+ /* temporarily save the intermediate data */
+ _mm_store_si64((__m64 *)outptr0 + 2, nextcolsum_1l);
+ _mm_store_si64((__m64 *)outptr0 + 3, nextcolsum_1h);
+ _mm_store_si64((__m64 *)outptr1 + 2, nextcolsum1l);
+ _mm_store_si64((__m64 *)outptr1 + 3, nextcolsum1h);
+
+ wk[2] = _mm_slli_si64(nextcolsum_1l, (SIZEOF_MMWORD - 2) * BYTE_BIT); /* ( - - - 0) */
+ wk[3] = _mm_slli_si64(nextcolsum1l, (SIZEOF_MMWORD - 2) * BYTE_BIT); /* ( - - - 0) */
+ } else {
+ __m64 tmp;
+
+ /* process the last column block */
+ tmp = _mm_load_si64((__m64 *)outptr0 + 1);
+ wk[2] = _mm_and_si64(masklast, tmp); /* ( - - - 7) */
+ tmp = _mm_load_si64((__m64 *)outptr1 + 1);
+ wk[3] = _mm_and_si64(masklast, tmp); /* ( - - - 7) */
+ }
+
+ /* process the upper row */
+ samp0123 = _mm_load_si64((__m64 *)outptr0); /* ( 0 1 2 3) */ \
+ samp4567 = _mm_load_si64((__m64 *)outptr0 + 1); /* ( 4 5 6 7) */ \
+ PROCESS_ROW(0, 2, PW_EIGHT, PW_SEVEN, 4)
+
+ /* process the lower row */
+ samp0123 = _mm_load_si64((__m64 *)outptr1); /* ( 0 1 2 3) */ \
+ samp4567 = _mm_load_si64((__m64 *)outptr1 + 1); /* ( 4 5 6 7) */ \
+ PROCESS_ROW(1, 2, PW_EIGHT, PW_SEVEN, 4)
+ }
+ }
+}
+
+
+void jsimd_h2v1_fancy_upsample_mmi(int max_v_samp_factor,
+ JDIMENSION downsampled_width,
+ JSAMPARRAY input_data,
+ JSAMPARRAY *output_data_ptr)
+{
+ JSAMPARRAY output_data = *output_data_ptr;
+ JSAMPROW inptr0, outptr0;
+ int inrow, incol, tmp, tmp1;
+ __m64 thisl, this, nextl, next;
+ __m64 mask0 = 0.0, masklast, samp0123, samp4567, wk[2], zero = 0.0;
+
+ mask0 = _mm_cmpeq_pi8(mask0, mask0);
+ masklast = _mm_slli_si64(mask0, (SIZEOF_MMWORD - 2) * BYTE_BIT);
+ mask0 = _mm_srli_si64(mask0, (SIZEOF_MMWORD - 2) * BYTE_BIT);
+
+ for (inrow = 0; inrow < max_v_samp_factor; inrow++) {
+
+ inptr0 = input_data[inrow];
+ outptr0 = output_data[inrow];
+
+ if (downsampled_width & 7) {
+ tmp = (downsampled_width - 1) * sizeof(JSAMPLE);
+ tmp1 = downsampled_width * sizeof(JSAMPLE);
+ asm(PTR_ADDU "$8, %1, %2\r\n"
+ "lb $9, ($8)\r\n"
+ PTR_ADDU "$8, %1, %3\r\n"
+ "sb $9, ($8)\r\n"
+ : "=m" (*inptr0)
+ : "r" (inptr0), "r" (tmp), "r" (tmp1)
+ : "$8", "$9"
+ );
+ }
+
+ /* process the first column block */
+ this = _mm_load_si64((__m64 *)inptr0); /* row[ 0][0] */
+ thisl = _mm_unpacklo_pi8(this, zero); /* row[ 0][0]( 0 1 2 3) */
+ wk[0] = _mm_and_si64(thisl, mask0); /* ( 0 - - -) */
+
+ for (incol = downsampled_width; incol > 0;
+ incol -= 8, inptr0 += 8, outptr0 += 16) {
+
+ if (incol > 8) {
+ /* process the next column block */
+ next = _mm_load_si64((__m64 *)inptr0 + 1); /* row[ 0][1] */
+ nextl = _mm_unpacklo_pi8(next, zero); /* row[ 0][1]( 0 1 2 3) */
+ wk[1] = _mm_slli_si64(nextl, (SIZEOF_MMWORD - 2) * BYTE_BIT); /* ( - - - 0) */
+ } else {
+ __m64 thish;
+
+ /* process the last column block */
+ this = _mm_load_si64((__m64 *)inptr0); /* row[ 0][0] */
+ thish = _mm_unpackhi_pi8(this, zero); /* row[ 0][1]( 4 5 6 7) */
+ wk[1] = _mm_and_si64(masklast, thish); /* ( - - - 7) */
+ }
+
+ /* process the row */
+ this = _mm_load_si64((__m64 *)inptr0); /* row[ 0][0] */
+ samp0123 = _mm_unpacklo_pi8(this, zero); /* ( 0 1 2 3) */
+ samp4567 = _mm_unpackhi_pi8(this, zero); /* ( 4 5 6 7) */
+ PROCESS_ROW(0, 1, PW_ONE, PW_TWO, 2)
+ }
+ }
+}
diff --git a/media/libjpeg/simd/mips64/jfdctfst-mmi.c b/media/libjpeg/simd/mips64/jfdctfst-mmi.c
new file mode 100644
index 0000000000..f7caf09a88
--- /dev/null
+++ b/media/libjpeg/simd/mips64/jfdctfst-mmi.c
@@ -0,0 +1,255 @@
+/*
+ * Loongson MMI optimizations for libjpeg-turbo
+ *
+ * Copyright (C) 2014, 2018-2019, D. R. Commander. All Rights Reserved.
+ * Copyright (C) 2016-2018, Loongson Technology Corporation Limited, BeiJing.
+ * All Rights Reserved.
+ * Authors: LiuQingfa <liuqingfa-hf@loongson.cn>
+ *
+ * Based on the x86 SIMD extension for IJG JPEG library
+ * Copyright (C) 1999-2006, MIYASAKA Masaru.
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/* FAST INTEGER FORWARD DCT */
+
+#include "jsimd_mmi.h"
+
+
+#define CONST_BITS 8
+
+#define F_0_382 ((short)98) /* FIX(0.382683433) */
+#define F_0_541 ((short)139) /* FIX(0.541196100) */
+#define F_0_707 ((short)181) /* FIX(0.707106781) */
+#define F_1_306 ((short)334) /* FIX(1.306562965) */
+
+#define PRE_MULTIPLY_SCALE_BITS 2
+#define CONST_SHIFT (16 - PRE_MULTIPLY_SCALE_BITS - CONST_BITS)
+
+enum const_index {
+ index_PW_F0707,
+ index_PW_F0382,
+ index_PW_F0541,
+ index_PW_F1306
+};
+
+static uint64_t const_value[] = {
+ _uint64_set1_pi16(F_0_707),
+ _uint64_set1_pi16(F_0_382),
+ _uint64_set1_pi16(F_0_541),
+ _uint64_set1_pi16(F_1_306)
+};
+
+#define PW_F0707 get_const_value(index_PW_F0707)
+#define PW_F0382 get_const_value(index_PW_F0382)
+#define PW_F0541 get_const_value(index_PW_F0541)
+#define PW_F1306 get_const_value(index_PW_F1306)
+
+
+#define DO_FDCT_MULTIPLY(out, in, multiplier) { \
+ __m64 mulhi, mullo, mul12, mul34; \
+ \
+ mullo = _mm_mullo_pi16(in, multiplier); \
+ mulhi = _mm_mulhi_pi16(in, multiplier); \
+ mul12 = _mm_unpacklo_pi16(mullo, mulhi); \
+ mul34 = _mm_unpackhi_pi16(mullo, mulhi); \
+ mul12 = _mm_srai_pi32(mul12, CONST_BITS); \
+ mul34 = _mm_srai_pi32(mul34, CONST_BITS); \
+ out = _mm_packs_pi32(mul12, mul34); \
+}
+
+#define DO_FDCT_COMMON() { \
+ \
+ /* Even part */ \
+ \
+ tmp10 = _mm_add_pi16(tmp0, tmp3); \
+ tmp13 = _mm_sub_pi16(tmp0, tmp3); \
+ tmp11 = _mm_add_pi16(tmp1, tmp2); \
+ tmp12 = _mm_sub_pi16(tmp1, tmp2); \
+ \
+ out0 = _mm_add_pi16(tmp10, tmp11); \
+ out4 = _mm_sub_pi16(tmp10, tmp11); \
+ \
+ z1 = _mm_add_pi16(tmp12, tmp13); \
+ DO_FDCT_MULTIPLY(z1, z1, PW_F0707) \
+ \
+ out2 = _mm_add_pi16(tmp13, z1); \
+ out6 = _mm_sub_pi16(tmp13, z1); \
+ \
+ /* Odd part */ \
+ \
+ tmp10 = _mm_add_pi16(tmp4, tmp5); \
+ tmp11 = _mm_add_pi16(tmp5, tmp6); \
+ tmp12 = _mm_add_pi16(tmp6, tmp7); \
+ \
+ z5 = _mm_sub_pi16(tmp10, tmp12); \
+ DO_FDCT_MULTIPLY(z5, z5, PW_F0382) \
+ \
+ DO_FDCT_MULTIPLY(z2, tmp10, PW_F0541) \
+ z2 = _mm_add_pi16(z2, z5); \
+ \
+ DO_FDCT_MULTIPLY(z4, tmp12, PW_F1306) \
+ z4 = _mm_add_pi16(z4, z5); \
+ \
+ DO_FDCT_MULTIPLY(z3, tmp11, PW_F0707) \
+ \
+ z11 = _mm_add_pi16(tmp7, z3); \
+ z13 = _mm_sub_pi16(tmp7, z3); \
+ \
+ out5 = _mm_add_pi16(z13, z2); \
+ out3 = _mm_sub_pi16(z13, z2); \
+ out1 = _mm_add_pi16(z11, z4); \
+ out7 = _mm_sub_pi16(z11, z4); \
+}
+
+#define DO_FDCT_PASS1() { \
+ __m64 row0l, row0h, row1l, row1h, row2l, row2h, row3l, row3h; \
+ __m64 row01a, row01b, row01c, row01d, row23a, row23b, row23c, row23d; \
+ __m64 col0, col1, col2, col3, col4, col5, col6, col7; \
+ \
+ row0l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 0]); /* (00 01 02 03) */ \
+ row0h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 0 + 4]); /* (04 05 06 07) */ \
+ row1l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 1]); /* (10 11 12 13) */ \
+ row1h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 1 + 4]); /* (14 15 16 17) */ \
+ row2l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 2]); /* (20 21 22 23) */ \
+ row2h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 2 + 4]); /* (24 25 26 27) */ \
+ row3l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 3]); /* (30 31 32 33) */ \
+ row3h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 3 + 4]); /* (34 35 36 37) */ \
+ \
+ /* Transpose coefficients */ \
+ \
+ row23a = _mm_unpacklo_pi16(row2l, row3l); /* row23a=(20 30 21 31) */ \
+ row23b = _mm_unpackhi_pi16(row2l, row3l); /* row23b=(22 32 23 33) */ \
+ row23c = _mm_unpacklo_pi16(row2h, row3h); /* row23c=(24 34 25 35) */ \
+ row23d = _mm_unpackhi_pi16(row2h, row3h); /* row23d=(26 36 27 37) */ \
+ \
+ row01a = _mm_unpacklo_pi16(row0l, row1l); /* row01a=(00 10 01 11) */ \
+ row01b = _mm_unpackhi_pi16(row0l, row1l); /* row01b=(02 12 03 13) */ \
+ row01c = _mm_unpacklo_pi16(row0h, row1h); /* row01c=(04 14 05 15) */ \
+ row01d = _mm_unpackhi_pi16(row0h, row1h); /* row01d=(06 16 07 17) */ \
+ \
+ col0 = _mm_unpacklo_pi32(row01a, row23a); /* col0=(00 10 20 30) */ \
+ col1 = _mm_unpackhi_pi32(row01a, row23a); /* col1=(01 11 21 31) */ \
+ col6 = _mm_unpacklo_pi32(row01d, row23d); /* col6=(06 16 26 36) */ \
+ col7 = _mm_unpackhi_pi32(row01d, row23d); /* col7=(07 17 27 37) */ \
+ \
+ tmp6 = _mm_sub_pi16(col1, col6); /* tmp6=col1-col6 */ \
+ tmp7 = _mm_sub_pi16(col0, col7); /* tmp7=col0-col7 */ \
+ tmp1 = _mm_add_pi16(col1, col6); /* tmp1=col1+col6 */ \
+ tmp0 = _mm_add_pi16(col0, col7); /* tmp0=col0+col7 */ \
+ \
+ col2 = _mm_unpacklo_pi32(row01b, row23b); /* col2=(02 12 22 32) */ \
+ col3 = _mm_unpackhi_pi32(row01b, row23b); /* col3=(03 13 23 33) */ \
+ col4 = _mm_unpacklo_pi32(row01c, row23c); /* col4=(04 14 24 34) */ \
+ col5 = _mm_unpackhi_pi32(row01c, row23c); /* col5=(05 15 25 35) */ \
+ \
+ tmp3 = _mm_add_pi16(col3, col4); /* tmp3=col3+col4 */ \
+ tmp2 = _mm_add_pi16(col2, col5); /* tmp2=col2+col5 */ \
+ tmp4 = _mm_sub_pi16(col3, col4); /* tmp4=col3-col4 */ \
+ tmp5 = _mm_sub_pi16(col2, col5); /* tmp5=col2-col5 */ \
+ \
+ DO_FDCT_COMMON() \
+ \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 0], out0); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 0 + 4], out4); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 1], out1); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 1 + 4], out5); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 2], out2); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 2 + 4], out6); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 3], out3); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 3 + 4], out7); \
+}
+
+#define DO_FDCT_PASS2() { \
+ __m64 col0l, col0h, col1l, col1h, col2l, col2h, col3l, col3h; \
+ __m64 col01a, col01b, col01c, col01d, col23a, col23b, col23c, col23d; \
+ __m64 row0, row1, row2, row3, row4, row5, row6, row7; \
+ \
+ col0l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 0]); /* (00 10 20 30) */ \
+ col1l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 1]); /* (01 11 21 31) */ \
+ col2l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 2]); /* (02 12 22 32) */ \
+ col3l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 3]); /* (03 13 23 33) */ \
+ col0h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 4]); /* (40 50 60 70) */ \
+ col1h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 5]); /* (41 51 61 71) */ \
+ col2h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 6]); /* (42 52 62 72) */ \
+ col3h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 7]); /* (43 53 63 73) */ \
+ \
+ /* Transpose coefficients */ \
+ \
+ col23a = _mm_unpacklo_pi16(col2l, col3l); /* col23a=(02 03 12 13) */ \
+ col23b = _mm_unpackhi_pi16(col2l, col3l); /* col23b=(22 23 32 33) */ \
+ col23c = _mm_unpacklo_pi16(col2h, col3h); /* col23c=(42 43 52 53) */ \
+ col23d = _mm_unpackhi_pi16(col2h, col3h); /* col23d=(62 63 72 73) */ \
+ \
+ col01a = _mm_unpacklo_pi16(col0l, col1l); /* col01a=(00 01 10 11) */ \
+ col01b = _mm_unpackhi_pi16(col0l, col1l); /* col01b=(20 21 30 31) */ \
+ col01c = _mm_unpacklo_pi16(col0h, col1h); /* col01c=(40 41 50 51) */ \
+ col01d = _mm_unpackhi_pi16(col0h, col1h); /* col01d=(60 61 70 71) */ \
+ \
+ row0 = _mm_unpacklo_pi32(col01a, col23a); /* row0=(00 01 02 03) */ \
+ row1 = _mm_unpackhi_pi32(col01a, col23a); /* row1=(10 11 12 13) */ \
+ row6 = _mm_unpacklo_pi32(col01d, col23d); /* row6=(60 61 62 63) */ \
+ row7 = _mm_unpackhi_pi32(col01d, col23d); /* row7=(70 71 72 73) */ \
+ \
+ tmp6 = _mm_sub_pi16(row1, row6); /* tmp6=row1-row6 */ \
+ tmp7 = _mm_sub_pi16(row0, row7); /* tmp7=row0-row7 */ \
+ tmp1 = _mm_add_pi16(row1, row6); /* tmp1=row1+row6 */ \
+ tmp0 = _mm_add_pi16(row0, row7); /* tmp0=row0+row7 */ \
+ \
+ row2 = _mm_unpacklo_pi32(col01b, col23b); /* row2=(20 21 22 23) */ \
+ row3 = _mm_unpackhi_pi32(col01b, col23b); /* row3=(30 31 32 33) */ \
+ row4 = _mm_unpacklo_pi32(col01c, col23c); /* row4=(40 41 42 43) */ \
+ row5 = _mm_unpackhi_pi32(col01c, col23c); /* row5=(50 51 52 53) */ \
+ \
+ tmp3 = _mm_add_pi16(row3, row4); /* tmp3=row3+row4 */ \
+ tmp2 = _mm_add_pi16(row2, row5); /* tmp2=row2+row5 */ \
+ tmp4 = _mm_sub_pi16(row3, row4); /* tmp4=row3-row4 */ \
+ tmp5 = _mm_sub_pi16(row2, row5); /* tmp5=row2-row5 */ \
+ \
+ DO_FDCT_COMMON() \
+ \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 0], out0); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 1], out1); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 2], out2); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 3], out3); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 4], out4); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 5], out5); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 6], out6); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 7], out7); \
+}
+
+void jsimd_fdct_ifast_mmi(DCTELEM *data)
+{
+ __m64 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ __m64 out0, out1, out2, out3, out4, out5, out6, out7;
+ __m64 tmp10, tmp11, tmp12, tmp13, z1, z2, z3, z4, z5, z11, z13;
+ DCTELEM *dataptr = data;
+
+ /* Pass 1: process rows. */
+
+ DO_FDCT_PASS1()
+ dataptr += DCTSIZE * 4;
+ DO_FDCT_PASS1()
+
+ /* Pass 2: process columns. */
+
+ dataptr = data;
+ DO_FDCT_PASS2()
+ dataptr += 4;
+ DO_FDCT_PASS2()
+}
diff --git a/media/libjpeg/simd/mips64/jfdctint-mmi.c b/media/libjpeg/simd/mips64/jfdctint-mmi.c
new file mode 100644
index 0000000000..7f4dfe9123
--- /dev/null
+++ b/media/libjpeg/simd/mips64/jfdctint-mmi.c
@@ -0,0 +1,398 @@
+/*
+ * Loongson MMI optimizations for libjpeg-turbo
+ *
+ * Copyright (C) 2014, 2018, 2020, D. R. Commander. All Rights Reserved.
+ * Copyright (C) 2016-2017, Loongson Technology Corporation Limited, BeiJing.
+ * All Rights Reserved.
+ * Authors: ZhuChen <zhuchen@loongson.cn>
+ * CaiWanwei <caiwanwei@loongson.cn>
+ * SunZhangzhi <sunzhangzhi-cq@loongson.cn>
+ *
+ * Based on the x86 SIMD extension for IJG JPEG library
+ * Copyright (C) 1999-2006, MIYASAKA Masaru.
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/* ACCURATE INTEGER FORWARD DCT */
+
+#include "jsimd_mmi.h"
+
+
+#define CONST_BITS 13
+#define PASS1_BITS 2
+#define DESCALE_P1 (CONST_BITS - PASS1_BITS)
+#define DESCALE_P2 (CONST_BITS + PASS1_BITS)
+
+#define FIX_0_298 ((short)2446) /* FIX(0.298631336) */
+#define FIX_0_390 ((short)3196) /* FIX(0.390180644) */
+#define FIX_0_541 ((short)4433) /* FIX(0.541196100) */
+#define FIX_0_765 ((short)6270) /* FIX(0.765366865) */
+#define FIX_0_899 ((short)7373) /* FIX(0.899976223) */
+#define FIX_1_175 ((short)9633) /* FIX(1.175875602) */
+#define FIX_1_501 ((short)12299) /* FIX(1.501321110) */
+#define FIX_1_847 ((short)15137) /* FIX(1.847759065) */
+#define FIX_1_961 ((short)16069) /* FIX(1.961570560) */
+#define FIX_2_053 ((short)16819) /* FIX(2.053119869) */
+#define FIX_2_562 ((short)20995) /* FIX(2.562915447) */
+#define FIX_3_072 ((short)25172) /* FIX(3.072711026) */
+
+enum const_index {
+ index_PW_F130_F054,
+ index_PW_F054_MF130,
+ index_PW_MF078_F117,
+ index_PW_F117_F078,
+ index_PW_MF060_MF089,
+ index_PW_MF089_F060,
+ index_PW_MF050_MF256,
+ index_PW_MF256_F050,
+ index_PD_DESCALE_P1,
+ index_PD_DESCALE_P2,
+ index_PW_DESCALE_P2X
+};
+
+static uint64_t const_value[] = {
+ _uint64_set_pi16(FIX_0_541, (FIX_0_541 + FIX_0_765),
+ FIX_0_541, (FIX_0_541 + FIX_0_765)),
+ _uint64_set_pi16((FIX_0_541 - FIX_1_847), FIX_0_541,
+ (FIX_0_541 - FIX_1_847), FIX_0_541),
+ _uint64_set_pi16(FIX_1_175, (FIX_1_175 - FIX_1_961),
+ FIX_1_175, (FIX_1_175 - FIX_1_961)),
+ _uint64_set_pi16((FIX_1_175 - FIX_0_390), FIX_1_175,
+ (FIX_1_175 - FIX_0_390), FIX_1_175),
+ _uint64_set_pi16(-FIX_0_899, (FIX_0_298 - FIX_0_899),
+ -FIX_0_899, (FIX_0_298 - FIX_0_899)),
+ _uint64_set_pi16((FIX_1_501 - FIX_0_899), -FIX_0_899,
+ (FIX_1_501 - FIX_0_899), -FIX_0_899),
+ _uint64_set_pi16(-FIX_2_562, (FIX_2_053 - FIX_2_562),
+ -FIX_2_562, (FIX_2_053 - FIX_2_562)),
+ _uint64_set_pi16((FIX_3_072 - FIX_2_562), -FIX_2_562,
+ (FIX_3_072 - FIX_2_562), -FIX_2_562),
+ _uint64_set_pi32((1 << (DESCALE_P1 - 1)), (1 << (DESCALE_P1 - 1))),
+ _uint64_set_pi32((1 << (DESCALE_P2 - 1)), (1 << (DESCALE_P2 - 1))),
+ _uint64_set_pi16((1 << (PASS1_BITS - 1)), (1 << (PASS1_BITS - 1)),
+ (1 << (PASS1_BITS - 1)), (1 << (PASS1_BITS - 1)))
+};
+
+#define PW_F130_F054 get_const_value(index_PW_F130_F054)
+#define PW_F054_MF130 get_const_value(index_PW_F054_MF130)
+#define PW_MF078_F117 get_const_value(index_PW_MF078_F117)
+#define PW_F117_F078 get_const_value(index_PW_F117_F078)
+#define PW_MF060_MF089 get_const_value(index_PW_MF060_MF089)
+#define PW_MF089_F060 get_const_value(index_PW_MF089_F060)
+#define PW_MF050_MF256 get_const_value(index_PW_MF050_MF256)
+#define PW_MF256_F050 get_const_value(index_PW_MF256_F050)
+#define PD_DESCALE_P1 get_const_value(index_PD_DESCALE_P1)
+#define PD_DESCALE_P2 get_const_value(index_PD_DESCALE_P2)
+#define PW_DESCALE_P2X get_const_value(index_PW_DESCALE_P2X)
+
+
+#define DO_FDCT_COMMON(PASS) { \
+ __m64 tmp1312l, tmp1312h, tmp47l, tmp47h, tmp4l, tmp4h, tmp7l, tmp7h; \
+ __m64 tmp56l, tmp56h, tmp5l, tmp5h, tmp6l, tmp6h; \
+ __m64 out1l, out1h, out2l, out2h, out3l, out3h; \
+ __m64 out5l, out5h, out6l, out6h, out7l, out7h; \
+ __m64 z34l, z34h, z3l, z3h, z4l, z4h, z3, z4; \
+ \
+ /* (Original) \
+ * z1 = (tmp12 + tmp13) * 0.541196100; \
+ * out2 = z1 + tmp13 * 0.765366865; \
+ * out6 = z1 + tmp12 * -1.847759065; \
+ * \
+ * (This implementation) \
+ * out2 = tmp13 * (0.541196100 + 0.765366865) + tmp12 * 0.541196100; \
+ * out6 = tmp13 * 0.541196100 + tmp12 * (0.541196100 - 1.847759065); \
+ */ \
+ \
+ tmp1312l = _mm_unpacklo_pi16(tmp13, tmp12); \
+ tmp1312h = _mm_unpackhi_pi16(tmp13, tmp12); \
+ \
+ out2l = _mm_madd_pi16(tmp1312l, PW_F130_F054); \
+ out2h = _mm_madd_pi16(tmp1312h, PW_F130_F054); \
+ out6l = _mm_madd_pi16(tmp1312l, PW_F054_MF130); \
+ out6h = _mm_madd_pi16(tmp1312h, PW_F054_MF130); \
+ \
+ out2l = _mm_add_pi32(out2l, PD_DESCALE_P##PASS); \
+ out2h = _mm_add_pi32(out2h, PD_DESCALE_P##PASS); \
+ out2l = _mm_srai_pi32(out2l, DESCALE_P##PASS); \
+ out2h = _mm_srai_pi32(out2h, DESCALE_P##PASS); \
+ \
+ out6l = _mm_add_pi32(out6l, PD_DESCALE_P##PASS); \
+ out6h = _mm_add_pi32(out6h, PD_DESCALE_P##PASS); \
+ out6l = _mm_srai_pi32(out6l, DESCALE_P##PASS); \
+ out6h = _mm_srai_pi32(out6h, DESCALE_P##PASS); \
+ \
+ out2 = _mm_packs_pi32(out2l, out2h); \
+ out6 = _mm_packs_pi32(out6l, out6h); \
+ \
+ /* Odd part */ \
+ \
+ z3 = _mm_add_pi16(tmp4, tmp6); \
+ z4 = _mm_add_pi16(tmp5, tmp7); \
+ \
+ /* (Original) \
+ * z5 = (z3 + z4) * 1.175875602; \
+ * z3 = z3 * -1.961570560; z4 = z4 * -0.390180644; \
+ * z3 += z5; z4 += z5; \
+ * \
+ * (This implementation) \
+ * z3 = z3 * (1.175875602 - 1.961570560) + z4 * 1.175875602; \
+ * z4 = z3 * 1.175875602 + z4 * (1.175875602 - 0.390180644); \
+ */ \
+ \
+ z34l = _mm_unpacklo_pi16(z3, z4); \
+ z34h = _mm_unpackhi_pi16(z3, z4); \
+ z3l = _mm_madd_pi16(z34l, PW_MF078_F117); \
+ z3h = _mm_madd_pi16(z34h, PW_MF078_F117); \
+ z4l = _mm_madd_pi16(z34l, PW_F117_F078); \
+ z4h = _mm_madd_pi16(z34h, PW_F117_F078); \
+ \
+ /* (Original) \
+ * z1 = tmp4 + tmp7; z2 = tmp5 + tmp6; \
+ * tmp4 = tmp4 * 0.298631336; tmp5 = tmp5 * 2.053119869; \
+ * tmp6 = tmp6 * 3.072711026; tmp7 = tmp7 * 1.501321110; \
+ * z1 = z1 * -0.899976223; z2 = z2 * -2.562915447; \
+ * out7 = tmp4 + z1 + z3; out5 = tmp5 + z2 + z4; \
+ * out3 = tmp6 + z2 + z3; out1 = tmp7 + z1 + z4; \
+ * \
+ * (This implementation) \
+ * tmp4 = tmp4 * (0.298631336 - 0.899976223) + tmp7 * -0.899976223; \
+ * tmp5 = tmp5 * (2.053119869 - 2.562915447) + tmp6 * -2.562915447; \
+ * tmp6 = tmp5 * -2.562915447 + tmp6 * (3.072711026 - 2.562915447); \
+ * tmp7 = tmp4 * -0.899976223 + tmp7 * (1.501321110 - 0.899976223); \
+ * out7 = tmp4 + z3; out5 = tmp5 + z4; \
+ * out3 = tmp6 + z3; out1 = tmp7 + z4; \
+ */ \
+ \
+ tmp47l = _mm_unpacklo_pi16(tmp4, tmp7); \
+ tmp47h = _mm_unpackhi_pi16(tmp4, tmp7); \
+ \
+ tmp4l = _mm_madd_pi16(tmp47l, PW_MF060_MF089); \
+ tmp4h = _mm_madd_pi16(tmp47h, PW_MF060_MF089); \
+ tmp7l = _mm_madd_pi16(tmp47l, PW_MF089_F060); \
+ tmp7h = _mm_madd_pi16(tmp47h, PW_MF089_F060); \
+ \
+ out7l = _mm_add_pi32(tmp4l, z3l); \
+ out7h = _mm_add_pi32(tmp4h, z3h); \
+ out1l = _mm_add_pi32(tmp7l, z4l); \
+ out1h = _mm_add_pi32(tmp7h, z4h); \
+ \
+ out7l = _mm_add_pi32(out7l, PD_DESCALE_P##PASS); \
+ out7h = _mm_add_pi32(out7h, PD_DESCALE_P##PASS); \
+ out7l = _mm_srai_pi32(out7l, DESCALE_P##PASS); \
+ out7h = _mm_srai_pi32(out7h, DESCALE_P##PASS); \
+ \
+ out1l = _mm_add_pi32(out1l, PD_DESCALE_P##PASS); \
+ out1h = _mm_add_pi32(out1h, PD_DESCALE_P##PASS); \
+ out1l = _mm_srai_pi32(out1l, DESCALE_P##PASS); \
+ out1h = _mm_srai_pi32(out1h, DESCALE_P##PASS); \
+ \
+ out7 = _mm_packs_pi32(out7l, out7h); \
+ out1 = _mm_packs_pi32(out1l, out1h); \
+ \
+ tmp56l = _mm_unpacklo_pi16(tmp5, tmp6); \
+ tmp56h = _mm_unpackhi_pi16(tmp5, tmp6); \
+ \
+ tmp5l = _mm_madd_pi16(tmp56l, PW_MF050_MF256); \
+ tmp5h = _mm_madd_pi16(tmp56h, PW_MF050_MF256); \
+ tmp6l = _mm_madd_pi16(tmp56l, PW_MF256_F050); \
+ tmp6h = _mm_madd_pi16(tmp56h, PW_MF256_F050); \
+ \
+ out5l = _mm_add_pi32(tmp5l, z4l); \
+ out5h = _mm_add_pi32(tmp5h, z4h); \
+ out3l = _mm_add_pi32(tmp6l, z3l); \
+ out3h = _mm_add_pi32(tmp6h, z3h); \
+ \
+ out5l = _mm_add_pi32(out5l, PD_DESCALE_P##PASS); \
+ out5h = _mm_add_pi32(out5h, PD_DESCALE_P##PASS); \
+ out5l = _mm_srai_pi32(out5l, DESCALE_P##PASS); \
+ out5h = _mm_srai_pi32(out5h, DESCALE_P##PASS); \
+ \
+ out3l = _mm_add_pi32(out3l, PD_DESCALE_P##PASS); \
+ out3h = _mm_add_pi32(out3h, PD_DESCALE_P##PASS); \
+ out3l = _mm_srai_pi32(out3l, DESCALE_P##PASS); \
+ out3h = _mm_srai_pi32(out3h, DESCALE_P##PASS); \
+ \
+ out5 = _mm_packs_pi32(out5l, out5h); \
+ out3 = _mm_packs_pi32(out3l, out3h); \
+}
+
+#define DO_FDCT_PASS1() { \
+ __m64 row0l, row0h, row1l, row1h, row2l, row2h, row3l, row3h; \
+ __m64 row01a, row01b, row01c, row01d, row23a, row23b, row23c, row23d; \
+ __m64 col0, col1, col2, col3, col4, col5, col6, col7; \
+ __m64 tmp10, tmp11; \
+ \
+ row0l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 0]); /* (00 01 02 03) */ \
+ row0h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 0 + 4]); /* (04 05 06 07) */ \
+ row1l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 1]); /* (10 11 12 13) */ \
+ row1h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 1 + 4]); /* (14 15 16 17) */ \
+ row2l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 2]); /* (20 21 22 23) */ \
+ row2h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 2 + 4]); /* (24 25 26 27) */ \
+ row3l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 3]); /* (30 31 32 33) */ \
+ row3h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 3 + 4]); /* (34 35 36 37) */ \
+ \
+ /* Transpose coefficients */ \
+ \
+ row23a = _mm_unpacklo_pi16(row2l, row3l); /* row23a=(20 30 21 31) */ \
+ row23b = _mm_unpackhi_pi16(row2l, row3l); /* row23b=(22 32 23 33) */ \
+ row23c = _mm_unpacklo_pi16(row2h, row3h); /* row23c=(24 34 25 35) */ \
+ row23d = _mm_unpackhi_pi16(row2h, row3h); /* row23d=(26 36 27 37) */ \
+ \
+ row01a = _mm_unpacklo_pi16(row0l, row1l); /* row01a=(00 10 01 11) */ \
+ row01b = _mm_unpackhi_pi16(row0l, row1l); /* row01b=(02 12 03 13) */ \
+ row01c = _mm_unpacklo_pi16(row0h, row1h); /* row01c=(04 14 05 15) */ \
+ row01d = _mm_unpackhi_pi16(row0h, row1h); /* row01d=(06 16 07 17) */ \
+ \
+ col0 = _mm_unpacklo_pi32(row01a, row23a); /* col0=(00 10 20 30) */ \
+ col1 = _mm_unpackhi_pi32(row01a, row23a); /* col1=(01 11 21 31) */ \
+ col6 = _mm_unpacklo_pi32(row01d, row23d); /* col6=(06 16 26 36) */ \
+ col7 = _mm_unpackhi_pi32(row01d, row23d); /* col7=(07 17 27 37) */ \
+ \
+ tmp6 = _mm_sub_pi16(col1, col6); /* tmp6=col1-col6 */ \
+ tmp7 = _mm_sub_pi16(col0, col7); /* tmp7=col0-col7 */ \
+ tmp1 = _mm_add_pi16(col1, col6); /* tmp1=col1+col6 */ \
+ tmp0 = _mm_add_pi16(col0, col7); /* tmp0=col0+col7 */ \
+ \
+ col2 = _mm_unpacklo_pi32(row01b, row23b); /* col2=(02 12 22 32) */ \
+ col3 = _mm_unpackhi_pi32(row01b, row23b); /* col3=(03 13 23 33) */ \
+ col4 = _mm_unpacklo_pi32(row01c, row23c); /* col4=(04 14 24 34) */ \
+ col5 = _mm_unpackhi_pi32(row01c, row23c); /* col5=(05 15 25 35) */ \
+ \
+ tmp3 = _mm_add_pi16(col3, col4); /* tmp3=col3+col4 */ \
+ tmp2 = _mm_add_pi16(col2, col5); /* tmp2=col2+col5 */ \
+ tmp4 = _mm_sub_pi16(col3, col4); /* tmp4=col3-col4 */ \
+ tmp5 = _mm_sub_pi16(col2, col5); /* tmp5=col2-col5 */ \
+ \
+ /* Even part */ \
+ \
+ tmp10 = _mm_add_pi16(tmp0, tmp3); /* tmp10=tmp0+tmp3 */ \
+ tmp13 = _mm_sub_pi16(tmp0, tmp3); /* tmp13=tmp0-tmp3 */ \
+ tmp11 = _mm_add_pi16(tmp1, tmp2); /* tmp11=tmp1+tmp2 */ \
+ tmp12 = _mm_sub_pi16(tmp1, tmp2); /* tmp12=tmp1-tmp2 */ \
+ \
+ out0 = _mm_add_pi16(tmp10, tmp11); /* out0=tmp10+tmp11 */ \
+ out4 = _mm_sub_pi16(tmp10, tmp11); /* out4=tmp10-tmp11 */ \
+ out0 = _mm_slli_pi16(out0, PASS1_BITS); \
+ out4 = _mm_slli_pi16(out4, PASS1_BITS); \
+ \
+ DO_FDCT_COMMON(1) \
+ \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 0], out0); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 0 + 4], out4); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 1], out1); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 1 + 4], out5); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 2], out2); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 2 + 4], out6); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 3], out3); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 3 + 4], out7); \
+}
+
+#define DO_FDCT_PASS2() { \
+ __m64 col0l, col0h, col1l, col1h, col2l, col2h, col3l, col3h; \
+ __m64 col01a, col01b, col01c, col01d, col23a, col23b, col23c, col23d; \
+ __m64 row0, row1, row2, row3, row4, row5, row6, row7; \
+ __m64 tmp10, tmp11; \
+ \
+ col0l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 0]); /* (00 10 20 30) */ \
+ col1l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 1]); /* (01 11 21 31) */ \
+ col2l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 2]); /* (02 12 22 32) */ \
+ col3l = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 3]); /* (03 13 23 33) */ \
+ col0h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 4]); /* (40 50 60 70) */ \
+ col1h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 5]); /* (41 51 61 71) */ \
+ col2h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 6]); /* (42 52 62 72) */ \
+ col3h = _mm_load_si64((__m64 *)&dataptr[DCTSIZE * 7]); /* (43 53 63 73) */ \
+ \
+ /* Transpose coefficients */ \
+ \
+ col23a = _mm_unpacklo_pi16(col2l, col3l); /* col23a=(02 03 12 13) */ \
+ col23b = _mm_unpackhi_pi16(col2l, col3l); /* col23b=(22 23 32 33) */ \
+ col23c = _mm_unpacklo_pi16(col2h, col3h); /* col23c=(42 43 52 53) */ \
+ col23d = _mm_unpackhi_pi16(col2h, col3h); /* col23d=(62 63 72 73) */ \
+ \
+ col01a = _mm_unpacklo_pi16(col0l, col1l); /* col01a=(00 01 10 11) */ \
+ col01b = _mm_unpackhi_pi16(col0l, col1l); /* col01b=(20 21 30 31) */ \
+ col01c = _mm_unpacklo_pi16(col0h, col1h); /* col01c=(40 41 50 51) */ \
+ col01d = _mm_unpackhi_pi16(col0h, col1h); /* col01d=(60 61 70 71) */ \
+ \
+ row0 = _mm_unpacklo_pi32(col01a, col23a); /* row0=(00 01 02 03) */ \
+ row1 = _mm_unpackhi_pi32(col01a, col23a); /* row1=(10 11 12 13) */ \
+ row6 = _mm_unpacklo_pi32(col01d, col23d); /* row6=(60 61 62 63) */ \
+ row7 = _mm_unpackhi_pi32(col01d, col23d); /* row7=(70 71 72 73) */ \
+ \
+ tmp6 = _mm_sub_pi16(row1, row6); /* tmp6=row1-row6 */ \
+ tmp7 = _mm_sub_pi16(row0, row7); /* tmp7=row0-row7 */ \
+ tmp1 = _mm_add_pi16(row1, row6); /* tmp1=row1+row6 */ \
+ tmp0 = _mm_add_pi16(row0, row7); /* tmp0=row0+row7 */ \
+ \
+ row2 = _mm_unpacklo_pi32(col01b, col23b); /* row2=(20 21 22 23) */ \
+ row3 = _mm_unpackhi_pi32(col01b, col23b); /* row3=(30 31 32 33) */ \
+ row4 = _mm_unpacklo_pi32(col01c, col23c); /* row4=(40 41 42 43) */ \
+ row5 = _mm_unpackhi_pi32(col01c, col23c); /* row5=(50 51 52 53) */ \
+ \
+ tmp3 = _mm_add_pi16(row3, row4); /* tmp3=row3+row4 */ \
+ tmp2 = _mm_add_pi16(row2, row5); /* tmp2=row2+row5 */ \
+ tmp4 = _mm_sub_pi16(row3, row4); /* tmp4=row3-row4 */ \
+ tmp5 = _mm_sub_pi16(row2, row5); /* tmp5=row2-row5 */ \
+ \
+ /* Even part */ \
+ \
+ tmp10 = _mm_add_pi16(tmp0, tmp3); /* tmp10=tmp0+tmp3 */ \
+ tmp13 = _mm_sub_pi16(tmp0, tmp3); /* tmp13=tmp0-tmp3 */ \
+ tmp11 = _mm_add_pi16(tmp1, tmp2); /* tmp11=tmp1+tmp2 */ \
+ tmp12 = _mm_sub_pi16(tmp1, tmp2); /* tmp12=tmp1-tmp2 */ \
+ \
+ out0 = _mm_add_pi16(tmp10, tmp11); /* out0=tmp10+tmp11 */ \
+ out4 = _mm_sub_pi16(tmp10, tmp11); /* out4=tmp10-tmp11 */ \
+ \
+ out0 = _mm_add_pi16(out0, PW_DESCALE_P2X); \
+ out4 = _mm_add_pi16(out4, PW_DESCALE_P2X); \
+ out0 = _mm_srai_pi16(out0, PASS1_BITS); \
+ out4 = _mm_srai_pi16(out4, PASS1_BITS); \
+ \
+ DO_FDCT_COMMON(2) \
+ \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 0], out0); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 1], out1); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 2], out2); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 3], out3); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 4], out4); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 5], out5); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 6], out6); \
+ _mm_store_si64((__m64 *)&dataptr[DCTSIZE * 7], out7); \
+}
+
+void jsimd_fdct_islow_mmi(DCTELEM *data)
+{
+ __m64 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ __m64 out0, out1, out2, out3, out4, out5, out6, out7;
+ __m64 tmp12, tmp13;
+ DCTELEM *dataptr = data;
+
+ /* Pass 1: process rows. */
+
+ DO_FDCT_PASS1()
+ dataptr += DCTSIZE * 4;
+ DO_FDCT_PASS1()
+
+ /* Pass 2: process columns. */
+
+ dataptr = data;
+ DO_FDCT_PASS2()
+ dataptr += 4;
+ DO_FDCT_PASS2()
+}
diff --git a/media/libjpeg/simd/mips64/jidctfst-mmi.c b/media/libjpeg/simd/mips64/jidctfst-mmi.c
new file mode 100644
index 0000000000..503bb35a3c
--- /dev/null
+++ b/media/libjpeg/simd/mips64/jidctfst-mmi.c
@@ -0,0 +1,395 @@
+/*
+ * Loongson MMI optimizations for libjpeg-turbo
+ *
+ * Copyright (C) 2014-2015, 2018-2019, D. R. Commander. All Rights Reserved.
+ * Copyright (C) 2016-2018, Loongson Technology Corporation Limited, BeiJing.
+ * All Rights Reserved.
+ * Authors: LiuQingfa <liuqingfa-hf@loongson.cn>
+ *
+ * Based on the x86 SIMD extension for IJG JPEG library
+ * Copyright (C) 1999-2006, MIYASAKA Masaru.
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/* FAST INTEGER INVERSE DCT */
+
+#include "jsimd_mmi.h"
+
+
+#define CONST_BITS 8
+#define PASS1_BITS 2
+
+#define FIX_1_082 ((short)277) /* FIX(1.082392200) */
+#define FIX_1_414 ((short)362) /* FIX(1.414213562) */
+#define FIX_1_847 ((short)473) /* FIX(1.847759065) */
+#define FIX_2_613 ((short)669) /* FIX(2.613125930) */
+#define FIX_1_613 ((short)(FIX_2_613 - 256 * 3)) /* FIX(2.613125930) - FIX(1) */
+
+#define PRE_MULTIPLY_SCALE_BITS 2
+#define CONST_SHIFT (16 - PRE_MULTIPLY_SCALE_BITS - CONST_BITS)
+
+enum const_index {
+ index_PW_F1082,
+ index_PW_F1414,
+ index_PW_F1847,
+ index_PW_MF1613,
+ index_PB_CENTERJSAMP
+};
+
+static uint64_t const_value[] = {
+ _uint64_set1_pi16(FIX_1_082 << CONST_SHIFT),
+ _uint64_set1_pi16(FIX_1_414 << CONST_SHIFT),
+ _uint64_set1_pi16(FIX_1_847 << CONST_SHIFT),
+ _uint64_set1_pi16(-FIX_1_613 << CONST_SHIFT),
+ _uint64_set1_pi8(CENTERJSAMPLE)
+};
+
+#define PW_F1414 get_const_value(index_PW_F1414)
+#define PW_F1847 get_const_value(index_PW_F1847)
+#define PW_MF1613 get_const_value(index_PW_MF1613)
+#define PW_F1082 get_const_value(index_PW_F1082)
+#define PB_CENTERJSAMP get_const_value(index_PB_CENTERJSAMP)
+
+
+#define test_m32_zero(mm32) (!(*(uint32_t *)&mm32))
+#define test_m64_zero(mm64) (!(*(uint64_t *)&mm64))
+
+
+#define DO_IDCT_COMMON() { \
+ tmp7 = _mm_add_pi16(z11, z13); \
+ \
+ tmp11 = _mm_sub_pi16(z11, z13); \
+ tmp11 = _mm_slli_pi16(tmp11, PRE_MULTIPLY_SCALE_BITS); \
+ tmp11 = _mm_mulhi_pi16(tmp11, PW_F1414); \
+ \
+ tmp10 = _mm_slli_pi16(z12, PRE_MULTIPLY_SCALE_BITS); \
+ tmp12 = _mm_slli_pi16(z10, PRE_MULTIPLY_SCALE_BITS); \
+ \
+ /* To avoid overflow... \
+ * \
+ * (Original) \
+ * tmp12 = -2.613125930 * z10 + z5; \
+ * \
+ * (This implementation) \
+ * tmp12 = (-1.613125930 - 1) * z10 + z5; \
+ * = -1.613125930 * z10 - z10 + z5; \
+ */ \
+ \
+ z5 = _mm_add_pi16(tmp10, tmp12); \
+ z5 = _mm_mulhi_pi16(z5, PW_F1847); \
+ \
+ tmp10 = _mm_mulhi_pi16(tmp10, PW_F1082); \
+ tmp10 = _mm_sub_pi16(tmp10, z5); \
+ tmp12 = _mm_mulhi_pi16(tmp12, PW_MF1613); \
+ tmp12 = _mm_sub_pi16(tmp12, z10); \
+ tmp12 = _mm_sub_pi16(tmp12, z10); \
+ tmp12 = _mm_sub_pi16(tmp12, z10); \
+ tmp12 = _mm_add_pi16(tmp12, z5); \
+ \
+ /* Final output stage */ \
+ \
+ tmp6 = _mm_sub_pi16(tmp12, tmp7); \
+ tmp5 = _mm_sub_pi16(tmp11, tmp6); \
+ tmp4 = _mm_add_pi16(tmp10, tmp5); \
+ \
+ out0 = _mm_add_pi16(tmp0, tmp7); \
+ out7 = _mm_sub_pi16(tmp0, tmp7); \
+ out1 = _mm_add_pi16(tmp1, tmp6); \
+ out6 = _mm_sub_pi16(tmp1, tmp6); \
+ \
+ out2 = _mm_add_pi16(tmp2, tmp5); \
+ out5 = _mm_sub_pi16(tmp2, tmp5); \
+ out4 = _mm_add_pi16(tmp3, tmp4); \
+ out3 = _mm_sub_pi16(tmp3, tmp4); \
+}
+
+#define DO_IDCT_PASS1(iter) { \
+ __m64 col0l, col1l, col2l, col3l, col4l, col5l, col6l, col7l; \
+ __m64 quant0l, quant1l, quant2l, quant3l; \
+ __m64 quant4l, quant5l, quant6l, quant7l; \
+ __m64 row01a, row01b, row01c, row01d, row23a, row23b, row23c, row23d; \
+ __m64 row0l, row0h, row1l, row1h, row2l, row2h, row3l, row3h; \
+ __m32 col0a, col1a, mm0; \
+ \
+ col0a = _mm_load_si32((__m32 *)&inptr[DCTSIZE * 1]); \
+ col1a = _mm_load_si32((__m32 *)&inptr[DCTSIZE * 2]); \
+ mm0 = _mm_or_si32(col0a, col1a); \
+ \
+ if (test_m32_zero(mm0)) { \
+ __m64 mm1, mm2; \
+ \
+ col0l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 0]); \
+ col1l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 1]); \
+ col2l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 2]); \
+ col3l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 3]); \
+ col4l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 4]); \
+ col5l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 5]); \
+ col6l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 6]); \
+ col7l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 7]); \
+ \
+ mm1 = _mm_or_si64(col1l, col3l); \
+ mm2 = _mm_or_si64(col2l, col4l); \
+ mm1 = _mm_or_si64(mm1, col5l); \
+ mm2 = _mm_or_si64(mm2, col6l); \
+ mm1 = _mm_or_si64(mm1, col7l); \
+ mm1 = _mm_or_si64(mm1, mm2); \
+ \
+ if (test_m64_zero(mm1)) { \
+ __m64 dcval, dcvall, dcvalh, row0, row1, row2, row3; \
+ \
+ /* AC terms all zero */ \
+ \
+ quant0l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 0]); \
+ \
+ dcval = _mm_mullo_pi16(col0l, quant0l); /* dcval=(00 10 20 30) */ \
+ \
+ dcvall = _mm_unpacklo_pi16(dcval, dcval); /* dcvall=(00 00 10 10) */ \
+ dcvalh = _mm_unpackhi_pi16(dcval, dcval); /* dcvalh=(20 20 30 30) */ \
+ \
+ row0 = _mm_unpacklo_pi32(dcvall, dcvall); /* row0=(00 00 00 00) */ \
+ row1 = _mm_unpackhi_pi32(dcvall, dcvall); /* row1=(10 10 10 10) */ \
+ row2 = _mm_unpacklo_pi32(dcvalh, dcvalh); /* row2=(20 20 20 20) */ \
+ row3 = _mm_unpackhi_pi32(dcvalh, dcvalh); /* row3=(30 30 30 30) */ \
+ \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 0], row0); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 0 + 4], row0); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 1], row1); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 1 + 4], row1); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 2], row2); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 2 + 4], row2); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 3], row3); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 3 + 4], row3); \
+ \
+ goto nextcolumn##iter; \
+ } \
+ } \
+ \
+ /* Even part */ \
+ \
+ col0l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 0]); /* (00 10 20 30) */ \
+ col2l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 2]); /* (02 12 22 32) */ \
+ col4l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 4]); /* (04 14 24 34) */ \
+ col6l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 6]); /* (06 16 26 36) */ \
+ \
+ quant0l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 0]); \
+ quant2l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 2]); \
+ quant4l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 4]); \
+ quant6l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 6]); \
+ \
+ tmp0 = _mm_mullo_pi16(col0l, quant0l); \
+ tmp1 = _mm_mullo_pi16(col2l, quant2l); \
+ tmp2 = _mm_mullo_pi16(col4l, quant4l); \
+ tmp3 = _mm_mullo_pi16(col6l, quant6l); \
+ \
+ tmp10 = _mm_add_pi16(tmp0, tmp2); \
+ tmp11 = _mm_sub_pi16(tmp0, tmp2); \
+ tmp13 = _mm_add_pi16(tmp1, tmp3); \
+ \
+ tmp12 = _mm_sub_pi16(tmp1, tmp3); \
+ tmp12 = _mm_slli_pi16(tmp12, PRE_MULTIPLY_SCALE_BITS); \
+ tmp12 = _mm_mulhi_pi16(tmp12, PW_F1414); \
+ tmp12 = _mm_sub_pi16(tmp12, tmp13); \
+ \
+ tmp0 = _mm_add_pi16(tmp10, tmp13); \
+ tmp3 = _mm_sub_pi16(tmp10, tmp13); \
+ tmp1 = _mm_add_pi16(tmp11, tmp12); \
+ tmp2 = _mm_sub_pi16(tmp11, tmp12); \
+ \
+ /* Odd part */ \
+ \
+ col1l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 1]); /* (01 11 21 31) */ \
+ col3l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 3]); /* (03 13 23 33) */ \
+ col5l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 5]); /* (05 15 25 35) */ \
+ col7l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 7]); /* (07 17 27 37) */ \
+ \
+ quant1l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 1]); \
+ quant3l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 3]); \
+ quant5l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 5]); \
+ quant7l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 7]); \
+ \
+ tmp4 = _mm_mullo_pi16(col1l, quant1l); \
+ tmp5 = _mm_mullo_pi16(col3l, quant3l); \
+ tmp6 = _mm_mullo_pi16(col5l, quant5l); \
+ tmp7 = _mm_mullo_pi16(col7l, quant7l); \
+ \
+ z13 = _mm_add_pi16(tmp6, tmp5); \
+ z10 = _mm_sub_pi16(tmp6, tmp5); \
+ z11 = _mm_add_pi16(tmp4, tmp7); \
+ z12 = _mm_sub_pi16(tmp4, tmp7); \
+ \
+ DO_IDCT_COMMON() \
+ \
+ /* out0=(00 10 20 30), out1=(01 11 21 31) */ \
+ /* out2=(02 12 22 32), out3=(03 13 23 33) */ \
+ /* out4=(04 14 24 34), out5=(05 15 25 35) */ \
+ /* out6=(06 16 26 36), out7=(07 17 27 37) */ \
+ \
+ /* Transpose coefficients */ \
+ \
+ row01a = _mm_unpacklo_pi16(out0, out1); /* row01a=(00 01 10 11) */ \
+ row23a = _mm_unpackhi_pi16(out0, out1); /* row23a=(20 21 30 31) */ \
+ row01d = _mm_unpacklo_pi16(out6, out7); /* row01d=(06 07 16 17) */ \
+ row23d = _mm_unpackhi_pi16(out6, out7); /* row23d=(26 27 36 37) */ \
+ \
+ row01b = _mm_unpacklo_pi16(out2, out3); /* row01b=(02 03 12 13) */ \
+ row23b = _mm_unpackhi_pi16(out2, out3); /* row23b=(22 23 32 33) */ \
+ row01c = _mm_unpacklo_pi16(out4, out5); /* row01c=(04 05 14 15) */ \
+ row23c = _mm_unpackhi_pi16(out4, out5); /* row23c=(24 25 34 35) */ \
+ \
+ row0l = _mm_unpacklo_pi32(row01a, row01b); /* row0l=(00 01 02 03) */ \
+ row1l = _mm_unpackhi_pi32(row01a, row01b); /* row1l=(10 11 12 13) */ \
+ row2l = _mm_unpacklo_pi32(row23a, row23b); /* row2l=(20 21 22 23) */ \
+ row3l = _mm_unpackhi_pi32(row23a, row23b); /* row3l=(30 31 32 33) */ \
+ \
+ row0h = _mm_unpacklo_pi32(row01c, row01d); /* row0h=(04 05 06 07) */ \
+ row1h = _mm_unpackhi_pi32(row01c, row01d); /* row1h=(14 15 16 17) */ \
+ row2h = _mm_unpacklo_pi32(row23c, row23d); /* row2h=(24 25 26 27) */ \
+ row3h = _mm_unpackhi_pi32(row23c, row23d); /* row3h=(34 35 36 37) */ \
+ \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 0], row0l); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 0 + 4], row0h); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 1], row1l); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 1 + 4], row1h); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 2], row2l); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 2 + 4], row2h); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 3], row3l); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 3 + 4], row3h); \
+}
+
+#define DO_IDCT_PASS2(ctr) { \
+ __m64 row0l, row1l, row2l, row3l, row4l, row5l, row6l, row7l; \
+ __m64 col0123a, col0123b, col0123c, col0123d; \
+ __m64 col01l, col01h, col23l, col23h; \
+ __m64 col0, col1, col2, col3; \
+ __m64 row06, row17, row24, row35; \
+ \
+ row0l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 0]); /* (00 01 02 03) */ \
+ row1l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 1]); /* (10 11 12 13) */ \
+ row2l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 2]); /* (20 21 22 23) */ \
+ row3l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 3]); /* (30 31 32 33) */ \
+ row4l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 4]); /* (40 41 42 43) */ \
+ row5l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 5]); /* (50 51 52 53) */ \
+ row6l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 6]); /* (60 61 62 63) */ \
+ row7l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 7]); /* (70 71 72 73) */ \
+ \
+ /* Even part */ \
+ \
+ tmp10 = _mm_add_pi16(row0l, row4l); \
+ tmp11 = _mm_sub_pi16(row0l, row4l); \
+ tmp13 = _mm_add_pi16(row2l, row6l); \
+ \
+ tmp12 = _mm_sub_pi16(row2l, row6l); \
+ tmp12 = _mm_slli_pi16(tmp12, PRE_MULTIPLY_SCALE_BITS); \
+ tmp12 = _mm_mulhi_pi16(tmp12, PW_F1414); \
+ tmp12 = _mm_sub_pi16(tmp12, tmp13); \
+ \
+ tmp0 = _mm_add_pi16(tmp10, tmp13); \
+ tmp3 = _mm_sub_pi16(tmp10, tmp13); \
+ tmp1 = _mm_add_pi16(tmp11, tmp12); \
+ tmp2 = _mm_sub_pi16(tmp11, tmp12); \
+ \
+ /* Odd part */ \
+ \
+ z13 = _mm_add_pi16(row5l, row3l); \
+ z10 = _mm_sub_pi16(row5l, row3l); \
+ z11 = _mm_add_pi16(row1l, row7l); \
+ z12 = _mm_sub_pi16(row1l, row7l); \
+ \
+ DO_IDCT_COMMON() \
+ \
+ /* out0=(00 01 02 03), out1=(10 11 12 13) */ \
+ /* out2=(20 21 22 23), out3=(30 31 32 33) */ \
+ /* out4=(40 41 42 43), out5=(50 51 52 53) */ \
+ /* out6=(60 61 62 63), out7=(70 71 72 73) */ \
+ \
+ out0 = _mm_srai_pi16(out0, PASS1_BITS + 3); \
+ out1 = _mm_srai_pi16(out1, PASS1_BITS + 3); \
+ out2 = _mm_srai_pi16(out2, PASS1_BITS + 3); \
+ out3 = _mm_srai_pi16(out3, PASS1_BITS + 3); \
+ out4 = _mm_srai_pi16(out4, PASS1_BITS + 3); \
+ out5 = _mm_srai_pi16(out5, PASS1_BITS + 3); \
+ out6 = _mm_srai_pi16(out6, PASS1_BITS + 3); \
+ out7 = _mm_srai_pi16(out7, PASS1_BITS + 3); \
+ \
+ row06 = _mm_packs_pi16(out0, out6); /* row06=(00 01 02 03 60 61 62 63) */ \
+ row17 = _mm_packs_pi16(out1, out7); /* row17=(10 11 12 13 70 71 72 73) */ \
+ row24 = _mm_packs_pi16(out2, out4); /* row24=(20 21 22 23 40 41 42 43) */ \
+ row35 = _mm_packs_pi16(out3, out5); /* row35=(30 31 32 33 50 51 52 53) */ \
+ \
+ row06 = _mm_add_pi8(row06, PB_CENTERJSAMP); \
+ row17 = _mm_add_pi8(row17, PB_CENTERJSAMP); \
+ row24 = _mm_add_pi8(row24, PB_CENTERJSAMP); \
+ row35 = _mm_add_pi8(row35, PB_CENTERJSAMP); \
+ \
+ /* Transpose coefficients */ \
+ \
+ col0123a = _mm_unpacklo_pi8(row06, row17); /* col0123a=(00 10 01 11 02 12 03 13) */ \
+ col0123d = _mm_unpackhi_pi8(row06, row17); /* col0123d=(60 70 61 71 62 72 63 73) */ \
+ col0123b = _mm_unpacklo_pi8(row24, row35); /* col0123b=(20 30 21 31 22 32 23 33) */ \
+ col0123c = _mm_unpackhi_pi8(row24, row35); /* col0123c=(40 50 41 51 42 52 43 53) */ \
+ \
+ col01l = _mm_unpacklo_pi16(col0123a, col0123b); /* col01l=(00 10 20 30 01 11 21 31) */ \
+ col23l = _mm_unpackhi_pi16(col0123a, col0123b); /* col23l=(02 12 22 32 03 13 23 33) */ \
+ col01h = _mm_unpacklo_pi16(col0123c, col0123d); /* col01h=(40 50 60 70 41 51 61 71) */ \
+ col23h = _mm_unpackhi_pi16(col0123c, col0123d); /* col23h=(42 52 62 72 43 53 63 73) */ \
+ \
+ col0 = _mm_unpacklo_pi32(col01l, col01h); /* col0=(00 10 20 30 40 50 60 70) */ \
+ col1 = _mm_unpackhi_pi32(col01l, col01h); /* col1=(01 11 21 31 41 51 61 71) */ \
+ col2 = _mm_unpacklo_pi32(col23l, col23h); /* col2=(02 12 22 32 42 52 62 72) */ \
+ col3 = _mm_unpackhi_pi32(col23l, col23h); /* col3=(03 13 23 33 43 53 63 73) */ \
+ \
+ _mm_store_si64((__m64 *)(output_buf[ctr + 0] + output_col), col0); \
+ _mm_store_si64((__m64 *)(output_buf[ctr + 1] + output_col), col1); \
+ _mm_store_si64((__m64 *)(output_buf[ctr + 2] + output_col), col2); \
+ _mm_store_si64((__m64 *)(output_buf[ctr + 3] + output_col), col3); \
+}
+
+void jsimd_idct_ifast_mmi(void *dct_table, JCOEFPTR coef_block,
+ JSAMPARRAY output_buf, JDIMENSION output_col)
+{
+ __m64 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ __m64 tmp10, tmp11, tmp12, tmp13;
+ __m64 out0, out1, out2, out3, out4, out5, out6, out7;
+ __m64 z5, z10, z11, z12, z13;
+ JCOEFPTR inptr;
+ ISLOW_MULT_TYPE *quantptr;
+ JCOEF *wsptr;
+ JCOEF workspace[DCTSIZE2]; /* buffers data between passes */
+
+ /* Pass 1: process columns. */
+
+ inptr = coef_block;
+ quantptr = (ISLOW_MULT_TYPE *)dct_table;
+ wsptr = workspace;
+
+ DO_IDCT_PASS1(1)
+nextcolumn1:
+ inptr += 4;
+ quantptr += 4;
+ wsptr += DCTSIZE * 4;
+ DO_IDCT_PASS1(2)
+nextcolumn2:
+
+ /* Pass 2: process rows. */
+
+ wsptr = workspace;
+
+ DO_IDCT_PASS2(0)
+ wsptr += 4;
+ DO_IDCT_PASS2(4)
+}
diff --git a/media/libjpeg/simd/mips64/jidctint-mmi.c b/media/libjpeg/simd/mips64/jidctint-mmi.c
new file mode 100644
index 0000000000..cd3db980c5
--- /dev/null
+++ b/media/libjpeg/simd/mips64/jidctint-mmi.c
@@ -0,0 +1,571 @@
+/*
+ * Loongson MMI optimizations for libjpeg-turbo
+ *
+ * Copyright (C) 2014-2015, 2018, 2020, D. R. Commander. All Rights Reserved.
+ * Copyright (C) 2016-2017, Loongson Technology Corporation Limited, BeiJing.
+ * All Rights Reserved.
+ * Authors: ZhuChen <zhuchen@loongson.cn>
+ * CaiWanwei <caiwanwei@loongson.cn>
+ * SunZhangzhi <sunzhangzhi-cq@loongson.cn>
+ *
+ * Based on the x86 SIMD extension for IJG JPEG library
+ * Copyright (C) 1999-2006, MIYASAKA Masaru.
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/* ACCUATE INTEGER INVERSE DCT */
+
+#include "jsimd_mmi.h"
+
+
+#define CONST_BITS 13
+#define PASS1_BITS 2
+#define DESCALE_P1 (CONST_BITS - PASS1_BITS)
+#define DESCALE_P2 (CONST_BITS + PASS1_BITS + 3)
+#define CENTERJSAMPLE 128
+
+#define FIX_0_298 ((short)2446) /* FIX(0.298631336) */
+#define FIX_0_390 ((short)3196) /* FIX(0.390180644) */
+#define FIX_0_899 ((short)7373) /* FIX(0.899976223) */
+#define FIX_0_541 ((short)4433) /* FIX(0.541196100) */
+#define FIX_0_765 ((short)6270) /* FIX(0.765366865) */
+#define FIX_1_175 ((short)9633) /* FIX(1.175875602) */
+#define FIX_1_501 ((short)12299) /* FIX(1.501321110) */
+#define FIX_1_847 ((short)15137) /* FIX(1.847759065) */
+#define FIX_1_961 ((short)16069) /* FIX(1.961570560) */
+#define FIX_2_053 ((short)16819) /* FIX(2.053119869) */
+#define FIX_2_562 ((short)20995) /* FIX(2.562915447) */
+#define FIX_3_072 ((short)25172) /* FIX(3.072711026) */
+
+enum const_index {
+ index_PW_F130_F054,
+ index_PW_F054_MF130,
+ index_PW_MF078_F117,
+ index_PW_F117_F078,
+ index_PW_MF060_MF089,
+ index_PW_MF089_F060,
+ index_PW_MF050_MF256,
+ index_PW_MF256_F050,
+ index_PD_DESCALE_P1,
+ index_PD_DESCALE_P2,
+ index_PB_CENTERJSAMP
+};
+
+static uint64_t const_value[] = {
+ _uint64_set_pi16(FIX_0_541, (FIX_0_541 + FIX_0_765),
+ FIX_0_541, (FIX_0_541 + FIX_0_765)),
+ _uint64_set_pi16((FIX_0_541 - FIX_1_847), FIX_0_541,
+ (FIX_0_541 - FIX_1_847), FIX_0_541),
+ _uint64_set_pi16(FIX_1_175, (FIX_1_175 - FIX_1_961),
+ FIX_1_175, (FIX_1_175 - FIX_1_961)),
+ _uint64_set_pi16((FIX_1_175 - FIX_0_390), FIX_1_175,
+ (FIX_1_175 - FIX_0_390), FIX_1_175),
+ _uint64_set_pi16(-FIX_0_899, (FIX_0_298 - FIX_0_899),
+ -FIX_0_899, (FIX_0_298 - FIX_0_899)),
+ _uint64_set_pi16((FIX_1_501 - FIX_0_899), -FIX_0_899,
+ (FIX_1_501 - FIX_0_899), -FIX_0_899),
+ _uint64_set_pi16(-FIX_2_562, (FIX_2_053 - FIX_2_562),
+ -FIX_2_562, (FIX_2_053 - FIX_2_562)),
+ _uint64_set_pi16((FIX_3_072 - FIX_2_562), -FIX_2_562,
+ (FIX_3_072 - FIX_2_562), -FIX_2_562),
+ _uint64_set_pi32((1 << (DESCALE_P1 - 1)), (1 << (DESCALE_P1 - 1))),
+ _uint64_set_pi32((1 << (DESCALE_P2 - 1)), (1 << (DESCALE_P2 - 1))),
+ _uint64_set_pi8(CENTERJSAMPLE, CENTERJSAMPLE, CENTERJSAMPLE, CENTERJSAMPLE,
+ CENTERJSAMPLE, CENTERJSAMPLE, CENTERJSAMPLE, CENTERJSAMPLE)
+};
+
+#define PW_F130_F054 get_const_value(index_PW_F130_F054)
+#define PW_F054_MF130 get_const_value(index_PW_F054_MF130)
+#define PW_MF078_F117 get_const_value(index_PW_MF078_F117)
+#define PW_F117_F078 get_const_value(index_PW_F117_F078)
+#define PW_MF060_MF089 get_const_value(index_PW_MF060_MF089)
+#define PW_MF089_F060 get_const_value(index_PW_MF089_F060)
+#define PW_MF050_MF256 get_const_value(index_PW_MF050_MF256)
+#define PW_MF256_F050 get_const_value(index_PW_MF256_F050)
+#define PD_DESCALE_P1 get_const_value(index_PD_DESCALE_P1)
+#define PD_DESCALE_P2 get_const_value(index_PD_DESCALE_P2)
+#define PB_CENTERJSAMP get_const_value(index_PB_CENTERJSAMP)
+
+
+#define test_m32_zero(mm32) (!(*(uint32_t *)&mm32))
+#define test_m64_zero(mm64) (!(*(uint64_t *)&mm64))
+
+
+#define DO_IDCT_COMMON(PASS) { \
+ __m64 tmp0_3l, tmp0_3h, tmp1_2l, tmp1_2h; \
+ __m64 tmp0l, tmp0h, tmp1l, tmp1h, tmp2l, tmp2h, tmp3l, tmp3h; \
+ __m64 z34l, z34h, z3l, z3h, z4l, z4h, z3, z4; \
+ __m64 out0l, out0h, out1l, out1h, out2l, out2h, out3l, out3h; \
+ __m64 out4l, out4h, out5l, out5h, out6l, out6h, out7l, out7h; \
+ \
+ z3 = _mm_add_pi16(tmp0, tmp2); \
+ z4 = _mm_add_pi16(tmp1, tmp3); \
+ \
+ /* (Original) \
+ * z5 = (z3 + z4) * 1.175875602; \
+ * z3 = z3 * -1.961570560; z4 = z4 * -0.390180644; \
+ * z3 += z5; z4 += z5; \
+ * \
+ * (This implementation) \
+ * z3 = z3 * (1.175875602 - 1.961570560) + z4 * 1.175875602; \
+ * z4 = z3 * 1.175875602 + z4 * (1.175875602 - 0.390180644); \
+ */ \
+ \
+ z34l = _mm_unpacklo_pi16(z3, z4); \
+ z34h = _mm_unpackhi_pi16(z3, z4); \
+ z3l = _mm_madd_pi16(z34l, PW_MF078_F117); \
+ z3h = _mm_madd_pi16(z34h, PW_MF078_F117); \
+ z4l = _mm_madd_pi16(z34l, PW_F117_F078); \
+ z4h = _mm_madd_pi16(z34h, PW_F117_F078); \
+ \
+ /* (Original) \
+ * z1 = tmp0 + tmp3; z2 = tmp1 + tmp2; \
+ * tmp0 = tmp0 * 0.298631336; tmp1 = tmp1 * 2.053119869; \
+ * tmp2 = tmp2 * 3.072711026; tmp3 = tmp3 * 1.501321110; \
+ * z1 = z1 * -0.899976223; z2 = z2 * -2.562915447; \
+ * tmp0 += z1 + z3; tmp1 += z2 + z4; \
+ * tmp2 += z2 + z3; tmp3 += z1 + z4; \
+ * \
+ * (This implementation) \
+ * tmp0 = tmp0 * (0.298631336 - 0.899976223) + tmp3 * -0.899976223; \
+ * tmp1 = tmp1 * (2.053119869 - 2.562915447) + tmp2 * -2.562915447; \
+ * tmp2 = tmp1 * -2.562915447 + tmp2 * (3.072711026 - 2.562915447); \
+ * tmp3 = tmp0 * -0.899976223 + tmp3 * (1.501321110 - 0.899976223); \
+ * tmp0 += z3; tmp1 += z4; \
+ * tmp2 += z3; tmp3 += z4; \
+ */ \
+ \
+ tmp0_3l = _mm_unpacklo_pi16(tmp0, tmp3); \
+ tmp0_3h = _mm_unpackhi_pi16(tmp0, tmp3); \
+ \
+ tmp0l = _mm_madd_pi16(tmp0_3l, PW_MF060_MF089); \
+ tmp0h = _mm_madd_pi16(tmp0_3h, PW_MF060_MF089); \
+ tmp3l = _mm_madd_pi16(tmp0_3l, PW_MF089_F060); \
+ tmp3h = _mm_madd_pi16(tmp0_3h, PW_MF089_F060); \
+ \
+ tmp0l = _mm_add_pi32(tmp0l, z3l); \
+ tmp0h = _mm_add_pi32(tmp0h, z3h); \
+ tmp3l = _mm_add_pi32(tmp3l, z4l); \
+ tmp3h = _mm_add_pi32(tmp3h, z4h); \
+ \
+ tmp1_2l = _mm_unpacklo_pi16(tmp1, tmp2); \
+ tmp1_2h = _mm_unpackhi_pi16(tmp1, tmp2); \
+ \
+ tmp1l = _mm_madd_pi16(tmp1_2l, PW_MF050_MF256); \
+ tmp1h = _mm_madd_pi16(tmp1_2h, PW_MF050_MF256); \
+ tmp2l = _mm_madd_pi16(tmp1_2l, PW_MF256_F050); \
+ tmp2h = _mm_madd_pi16(tmp1_2h, PW_MF256_F050); \
+ \
+ tmp1l = _mm_add_pi32(tmp1l, z4l); \
+ tmp1h = _mm_add_pi32(tmp1h, z4h); \
+ tmp2l = _mm_add_pi32(tmp2l, z3l); \
+ tmp2h = _mm_add_pi32(tmp2h, z3h); \
+ \
+ /* Final output stage */ \
+ \
+ out0l = _mm_add_pi32(tmp10l, tmp3l); \
+ out0h = _mm_add_pi32(tmp10h, tmp3h); \
+ out7l = _mm_sub_pi32(tmp10l, tmp3l); \
+ out7h = _mm_sub_pi32(tmp10h, tmp3h); \
+ \
+ out0l = _mm_add_pi32(out0l, PD_DESCALE_P##PASS); \
+ out0h = _mm_add_pi32(out0h, PD_DESCALE_P##PASS); \
+ out0l = _mm_srai_pi32(out0l, DESCALE_P##PASS); \
+ out0h = _mm_srai_pi32(out0h, DESCALE_P##PASS); \
+ \
+ out7l = _mm_add_pi32(out7l, PD_DESCALE_P##PASS); \
+ out7h = _mm_add_pi32(out7h, PD_DESCALE_P##PASS); \
+ out7l = _mm_srai_pi32(out7l, DESCALE_P##PASS); \
+ out7h = _mm_srai_pi32(out7h, DESCALE_P##PASS); \
+ \
+ out0 = _mm_packs_pi32(out0l, out0h); \
+ out7 = _mm_packs_pi32(out7l, out7h); \
+ \
+ out1l = _mm_add_pi32(tmp11l, tmp2l); \
+ out1h = _mm_add_pi32(tmp11h, tmp2h); \
+ out6l = _mm_sub_pi32(tmp11l, tmp2l); \
+ out6h = _mm_sub_pi32(tmp11h, tmp2h); \
+ \
+ out1l = _mm_add_pi32(out1l, PD_DESCALE_P##PASS); \
+ out1h = _mm_add_pi32(out1h, PD_DESCALE_P##PASS); \
+ out1l = _mm_srai_pi32(out1l, DESCALE_P##PASS); \
+ out1h = _mm_srai_pi32(out1h, DESCALE_P##PASS); \
+ \
+ out6l = _mm_add_pi32(out6l, PD_DESCALE_P##PASS); \
+ out6h = _mm_add_pi32(out6h, PD_DESCALE_P##PASS); \
+ out6l = _mm_srai_pi32(out6l, DESCALE_P##PASS); \
+ out6h = _mm_srai_pi32(out6h, DESCALE_P##PASS); \
+ \
+ out1 = _mm_packs_pi32(out1l, out1h); \
+ out6 = _mm_packs_pi32(out6l, out6h); \
+ \
+ out2l = _mm_add_pi32(tmp12l, tmp1l); \
+ out2h = _mm_add_pi32(tmp12h, tmp1h); \
+ out5l = _mm_sub_pi32(tmp12l, tmp1l); \
+ out5h = _mm_sub_pi32(tmp12h, tmp1h); \
+ \
+ out2l = _mm_add_pi32(out2l, PD_DESCALE_P##PASS); \
+ out2h = _mm_add_pi32(out2h, PD_DESCALE_P##PASS); \
+ out2l = _mm_srai_pi32(out2l, DESCALE_P##PASS); \
+ out2h = _mm_srai_pi32(out2h, DESCALE_P##PASS); \
+ \
+ out5l = _mm_add_pi32(out5l, PD_DESCALE_P##PASS); \
+ out5h = _mm_add_pi32(out5h, PD_DESCALE_P##PASS); \
+ out5l = _mm_srai_pi32(out5l, DESCALE_P##PASS); \
+ out5h = _mm_srai_pi32(out5h, DESCALE_P##PASS); \
+ \
+ out2 = _mm_packs_pi32(out2l, out2h); \
+ out5 = _mm_packs_pi32(out5l, out5h); \
+ \
+ out3l = _mm_add_pi32(tmp13l, tmp0l); \
+ out3h = _mm_add_pi32(tmp13h, tmp0h); \
+ \
+ out4l = _mm_sub_pi32(tmp13l, tmp0l); \
+ out4h = _mm_sub_pi32(tmp13h, tmp0h); \
+ \
+ out3l = _mm_add_pi32(out3l, PD_DESCALE_P##PASS); \
+ out3h = _mm_add_pi32(out3h, PD_DESCALE_P##PASS); \
+ out3l = _mm_srai_pi32(out3l, DESCALE_P##PASS); \
+ out3h = _mm_srai_pi32(out3h, DESCALE_P##PASS); \
+ \
+ out4l = _mm_add_pi32(out4l, PD_DESCALE_P##PASS); \
+ out4h = _mm_add_pi32(out4h, PD_DESCALE_P##PASS); \
+ out4l = _mm_srai_pi32(out4l, DESCALE_P##PASS); \
+ out4h = _mm_srai_pi32(out4h, DESCALE_P##PASS); \
+ \
+ out3 = _mm_packs_pi32(out3l, out3h); \
+ out4 = _mm_packs_pi32(out4l, out4h); \
+}
+
+#define DO_IDCT_PASS1(iter) { \
+ __m64 col0l, col1l, col2l, col3l, col4l, col5l, col6l, col7l; \
+ __m64 quant0l, quant1l, quant2l, quant3l; \
+ __m64 quant4l, quant5l, quant6l, quant7l; \
+ __m64 z23, z2, z3, z23l, z23h; \
+ __m64 row01a, row01b, row01c, row01d, row23a, row23b, row23c, row23d; \
+ __m64 row0l, row0h, row1l, row1h, row2l, row2h, row3l, row3h; \
+ __m64 tmp0l, tmp0h, tmp1l, tmp1h, tmp2l, tmp2h, tmp3l, tmp3h; \
+ __m64 tmp10l, tmp10h, tmp11l, tmp11h, tmp12l, tmp12h, tmp13l, tmp13h; \
+ __m32 col0a, col1a, mm0; \
+ \
+ col0a = _mm_load_si32((__m32 *)&inptr[DCTSIZE * 1]); \
+ col1a = _mm_load_si32((__m32 *)&inptr[DCTSIZE * 2]); \
+ mm0 = _mm_or_si32(col0a, col1a); \
+ \
+ if (test_m32_zero(mm0)) { \
+ __m64 mm1, mm2; \
+ \
+ col0l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 0]); \
+ col1l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 1]); \
+ col2l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 2]); \
+ col3l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 3]); \
+ col4l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 4]); \
+ col5l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 5]); \
+ col6l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 6]); \
+ col7l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 7]); \
+ \
+ mm1 = _mm_or_si64(col1l, col3l); \
+ mm2 = _mm_or_si64(col2l, col4l); \
+ mm1 = _mm_or_si64(mm1, col5l); \
+ mm2 = _mm_or_si64(mm2, col6l); \
+ mm1 = _mm_or_si64(mm1, col7l); \
+ mm1 = _mm_or_si64(mm1, mm2); \
+ \
+ if (test_m64_zero(mm1)) { \
+ __m64 dcval, dcvall, dcvalh, row0, row1, row2, row3; \
+ \
+ /* AC terms all zero */ \
+ \
+ quant0l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 0]); \
+ \
+ dcval = _mm_mullo_pi16(col0l, quant0l); \
+ dcval = _mm_slli_pi16(dcval, PASS1_BITS); /* dcval=(00 10 20 30) */ \
+ \
+ dcvall = _mm_unpacklo_pi16(dcval, dcval); /* dcvall=(00 00 10 10) */ \
+ dcvalh = _mm_unpackhi_pi16(dcval, dcval); /* dcvalh=(20 20 30 30) */ \
+ \
+ row0 = _mm_unpacklo_pi32(dcvall, dcvall); /* row0=(00 00 00 00) */ \
+ row1 = _mm_unpackhi_pi32(dcvall, dcvall); /* row1=(10 10 10 10) */ \
+ row2 = _mm_unpacklo_pi32(dcvalh, dcvalh); /* row2=(20 20 20 20) */ \
+ row3 = _mm_unpackhi_pi32(dcvalh, dcvalh); /* row3=(30 30 30 30) */ \
+ \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 0], row0); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 0 + 4], row0); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 1], row1); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 1 + 4], row1); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 2], row2); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 2 + 4], row2); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 3], row3); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 3 + 4], row3); \
+ \
+ goto nextcolumn##iter; \
+ } \
+ } \
+ \
+ /* Even part \
+ * \
+ * (Original) \
+ * z1 = (z2 + z3) * 0.541196100; \
+ * tmp2 = z1 + z3 * -1.847759065; \
+ * tmp3 = z1 + z2 * 0.765366865; \
+ * \
+ * (This implementation) \
+ * tmp2 = z2 * 0.541196100 + z3 * (0.541196100 - 1.847759065); \
+ * tmp3 = z2 * (0.541196100 + 0.765366865) + z3 * 0.541196100; \
+ */ \
+ \
+ col0l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 0]); /* (00 10 20 30) */ \
+ col2l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 2]); /* (02 12 22 32) */ \
+ col4l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 4]); /* (04 14 24 34) */ \
+ col6l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 6]); /* (06 16 26 36) */ \
+ \
+ quant0l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 0]); \
+ quant2l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 2]); \
+ quant4l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 4]); \
+ quant6l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 6]); \
+ \
+ z2 = _mm_mullo_pi16(col2l, quant2l); \
+ z3 = _mm_mullo_pi16(col6l, quant6l); \
+ \
+ z23l = _mm_unpacklo_pi16(z2, z3); \
+ z23h = _mm_unpackhi_pi16(z2, z3); \
+ tmp3l = _mm_madd_pi16(z23l, PW_F130_F054); \
+ tmp3h = _mm_madd_pi16(z23h, PW_F130_F054); \
+ tmp2l = _mm_madd_pi16(z23l, PW_F054_MF130); \
+ tmp2h = _mm_madd_pi16(z23h, PW_F054_MF130); \
+ \
+ z2 = _mm_mullo_pi16(col0l, quant0l); \
+ z3 = _mm_mullo_pi16(col4l, quant4l); \
+ \
+ z23 = _mm_add_pi16(z2, z3); \
+ tmp0l = _mm_loadlo_pi16_f(z23); \
+ tmp0h = _mm_loadhi_pi16_f(z23); \
+ tmp0l = _mm_srai_pi32(tmp0l, (16 - CONST_BITS)); \
+ tmp0h = _mm_srai_pi32(tmp0h, (16 - CONST_BITS)); \
+ \
+ tmp10l = _mm_add_pi32(tmp0l, tmp3l); \
+ tmp10h = _mm_add_pi32(tmp0h, tmp3h); \
+ tmp13l = _mm_sub_pi32(tmp0l, tmp3l); \
+ tmp13h = _mm_sub_pi32(tmp0h, tmp3h); \
+ \
+ z23 = _mm_sub_pi16(z2, z3); \
+ tmp1l = _mm_loadlo_pi16_f(z23); \
+ tmp1h = _mm_loadhi_pi16_f(z23); \
+ tmp1l = _mm_srai_pi32(tmp1l, (16 - CONST_BITS)); \
+ tmp1h = _mm_srai_pi32(tmp1h, (16 - CONST_BITS)); \
+ \
+ tmp11l = _mm_add_pi32(tmp1l, tmp2l); \
+ tmp11h = _mm_add_pi32(tmp1h, tmp2h); \
+ tmp12l = _mm_sub_pi32(tmp1l, tmp2l); \
+ tmp12h = _mm_sub_pi32(tmp1h, tmp2h); \
+ \
+ /* Odd part */ \
+ \
+ col1l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 1]); /* (01 11 21 31) */ \
+ col3l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 3]); /* (03 13 23 33) */ \
+ col5l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 5]); /* (05 15 25 35) */ \
+ col7l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 7]); /* (07 17 27 37) */ \
+ \
+ quant1l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 1]); \
+ quant3l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 3]); \
+ quant5l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 5]); \
+ quant7l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 7]); \
+ \
+ tmp0 = _mm_mullo_pi16(col7l, quant7l); \
+ tmp1 = _mm_mullo_pi16(col5l, quant5l); \
+ tmp2 = _mm_mullo_pi16(col3l, quant3l); \
+ tmp3 = _mm_mullo_pi16(col1l, quant1l); \
+ \
+ DO_IDCT_COMMON(1) \
+ \
+ /* out0=(00 10 20 30), out1=(01 11 21 31) */ \
+ /* out2=(02 12 22 32), out3=(03 13 23 33) */ \
+ /* out4=(04 14 24 34), out5=(05 15 25 35) */ \
+ /* out6=(06 16 26 36), out7=(07 17 27 37) */ \
+ \
+ /* Transpose coefficients */ \
+ \
+ row01a = _mm_unpacklo_pi16(out0, out1); /* row01a=(00 01 10 11) */ \
+ row23a = _mm_unpackhi_pi16(out0, out1); /* row23a=(20 21 30 31) */ \
+ row01d = _mm_unpacklo_pi16(out6, out7); /* row01d=(06 07 16 17) */ \
+ row23d = _mm_unpackhi_pi16(out6, out7); /* row23d=(26 27 36 37) */ \
+ \
+ row01b = _mm_unpacklo_pi16(out2, out3); /* row01b=(02 03 12 13) */ \
+ row23b = _mm_unpackhi_pi16(out2, out3); /* row23b=(22 23 32 33) */ \
+ row01c = _mm_unpacklo_pi16(out4, out5); /* row01c=(04 05 14 15) */ \
+ row23c = _mm_unpackhi_pi16(out4, out5); /* row23c=(24 25 34 35) */ \
+ \
+ row0l = _mm_unpacklo_pi32(row01a, row01b); /* row0l=(00 01 02 03) */ \
+ row1l = _mm_unpackhi_pi32(row01a, row01b); /* row1l=(10 11 12 13) */ \
+ row2l = _mm_unpacklo_pi32(row23a, row23b); /* row2l=(20 21 22 23) */ \
+ row3l = _mm_unpackhi_pi32(row23a, row23b); /* row3l=(30 31 32 33) */ \
+ \
+ row0h = _mm_unpacklo_pi32(row01c, row01d); /* row0h=(04 05 06 07) */ \
+ row1h = _mm_unpackhi_pi32(row01c, row01d); /* row1h=(14 15 16 17) */ \
+ row2h = _mm_unpacklo_pi32(row23c, row23d); /* row2h=(24 25 26 27) */ \
+ row3h = _mm_unpackhi_pi32(row23c, row23d); /* row3h=(34 35 36 37) */ \
+ \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 0], row0l); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 0 + 4], row0h); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 1], row1l); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 1 + 4], row1h); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 2], row2l); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 2 + 4], row2h); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 3], row3l); \
+ _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 3 + 4], row3h); \
+}
+
+#define DO_IDCT_PASS2(ctr) { \
+ __m64 row0l, row1l, row2l, row3l, row4l, row5l, row6l, row7l; \
+ __m64 z23, z23l, z23h; \
+ __m64 col0123a, col0123b, col0123c, col0123d; \
+ __m64 col01l, col01h, col23l, col23h, row06, row17, row24, row35; \
+ __m64 col0, col1, col2, col3; \
+ __m64 tmp0l, tmp0h, tmp1l, tmp1h, tmp2l, tmp2h, tmp3l, tmp3h; \
+ __m64 tmp10l, tmp10h, tmp11l, tmp11h, tmp12l, tmp12h, tmp13l, tmp13h; \
+ \
+ row0l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 0]); /* (00 01 02 03) */ \
+ row1l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 1]); /* (10 11 12 13) */ \
+ row2l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 2]); /* (20 21 22 23) */ \
+ row3l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 3]); /* (30 31 32 33) */ \
+ row4l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 4]); /* (40 41 42 43) */ \
+ row5l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 5]); /* (50 51 52 53) */ \
+ row6l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 6]); /* (60 61 62 63) */ \
+ row7l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 7]); /* (70 71 72 73) */ \
+ \
+ /* Even part \
+ * \
+ * (Original) \
+ * z1 = (z2 + z3) * 0.541196100; \
+ * tmp2 = z1 + z3 * -1.847759065; \
+ * tmp3 = z1 + z2 * 0.765366865; \
+ * \
+ * (This implementation) \
+ * tmp2 = z2 * 0.541196100 + z3 * (0.541196100 - 1.847759065); \
+ * tmp3 = z2 * (0.541196100 + 0.765366865) + z3 * 0.541196100; \
+ */ \
+ \
+ z23l = _mm_unpacklo_pi16(row2l, row6l); \
+ z23h = _mm_unpackhi_pi16(row2l, row6l); \
+ \
+ tmp3l = _mm_madd_pi16(z23l, PW_F130_F054); \
+ tmp3h = _mm_madd_pi16(z23h, PW_F130_F054); \
+ tmp2l = _mm_madd_pi16(z23l, PW_F054_MF130); \
+ tmp2h = _mm_madd_pi16(z23h, PW_F054_MF130); \
+ \
+ z23 = _mm_add_pi16(row0l, row4l); \
+ tmp0l = _mm_loadlo_pi16_f(z23); \
+ tmp0h = _mm_loadhi_pi16_f(z23); \
+ tmp0l = _mm_srai_pi32(tmp0l, (16 - CONST_BITS)); \
+ tmp0h = _mm_srai_pi32(tmp0h, (16 - CONST_BITS)); \
+ \
+ tmp10l = _mm_add_pi32(tmp0l, tmp3l); \
+ tmp10h = _mm_add_pi32(tmp0h, tmp3h); \
+ tmp13l = _mm_sub_pi32(tmp0l, tmp3l); \
+ tmp13h = _mm_sub_pi32(tmp0h, tmp3h); \
+ \
+ z23 = _mm_sub_pi16(row0l, row4l); \
+ tmp1l = _mm_loadlo_pi16_f(z23); \
+ tmp1h = _mm_loadhi_pi16_f(z23); \
+ tmp1l = _mm_srai_pi32(tmp1l, (16 - CONST_BITS)); \
+ tmp1h = _mm_srai_pi32(tmp1h, (16 - CONST_BITS)); \
+ \
+ tmp11l = _mm_add_pi32(tmp1l, tmp2l); \
+ tmp11h = _mm_add_pi32(tmp1h, tmp2h); \
+ tmp12l = _mm_sub_pi32(tmp1l, tmp2l); \
+ tmp12h = _mm_sub_pi32(tmp1h, tmp2h); \
+ \
+ /* Odd part */ \
+ \
+ tmp0 = row7l; \
+ tmp1 = row5l; \
+ tmp2 = row3l; \
+ tmp3 = row1l; \
+ \
+ DO_IDCT_COMMON(2) \
+ \
+ /* out0=(00 01 02 03), out1=(10 11 12 13) */ \
+ /* out2=(20 21 22 23), out3=(30 31 32 33) */ \
+ /* out4=(40 41 42 43), out5=(50 51 52 53) */ \
+ /* out6=(60 61 62 63), out7=(70 71 72 73) */ \
+ \
+ row06 = _mm_packs_pi16(out0, out6); /* row06=(00 01 02 03 60 61 62 63) */ \
+ row17 = _mm_packs_pi16(out1, out7); /* row17=(10 11 12 13 70 71 72 73) */ \
+ row24 = _mm_packs_pi16(out2, out4); /* row24=(20 21 22 23 40 41 42 43) */ \
+ row35 = _mm_packs_pi16(out3, out5); /* row35=(30 31 32 33 50 51 52 53) */ \
+ \
+ row06 = _mm_add_pi8(row06, PB_CENTERJSAMP); \
+ row17 = _mm_add_pi8(row17, PB_CENTERJSAMP); \
+ row24 = _mm_add_pi8(row24, PB_CENTERJSAMP); \
+ row35 = _mm_add_pi8(row35, PB_CENTERJSAMP); \
+ \
+ /* Transpose coefficients */ \
+ \
+ col0123a = _mm_unpacklo_pi8(row06, row17); /* col0123a=(00 10 01 11 02 12 03 13) */ \
+ col0123d = _mm_unpackhi_pi8(row06, row17); /* col0123d=(60 70 61 71 62 72 63 73) */ \
+ col0123b = _mm_unpacklo_pi8(row24, row35); /* col0123b=(20 30 21 31 22 32 23 33) */ \
+ col0123c = _mm_unpackhi_pi8(row24, row35); /* col0123c=(40 50 41 51 42 52 43 53) */ \
+ \
+ col01l = _mm_unpacklo_pi16(col0123a, col0123b); /* col01l=(00 10 20 30 01 11 21 31) */ \
+ col23l = _mm_unpackhi_pi16(col0123a, col0123b); /* col23l=(02 12 22 32 03 13 23 33) */ \
+ col01h = _mm_unpacklo_pi16(col0123c, col0123d); /* col01h=(40 50 60 70 41 51 61 71) */ \
+ col23h = _mm_unpackhi_pi16(col0123c, col0123d); /* col23h=(42 52 62 72 43 53 63 73) */ \
+ \
+ col0 = _mm_unpacklo_pi32(col01l, col01h); /* col0=(00 10 20 30 40 50 60 70) */ \
+ col1 = _mm_unpackhi_pi32(col01l, col01h); /* col1=(01 11 21 31 41 51 61 71) */ \
+ col2 = _mm_unpacklo_pi32(col23l, col23h); /* col2=(02 12 22 32 42 52 62 72) */ \
+ col3 = _mm_unpackhi_pi32(col23l, col23h); /* col3=(03 13 23 33 43 53 63 73) */ \
+ \
+ _mm_store_si64((__m64 *)(output_buf[ctr + 0] + output_col), col0); \
+ _mm_store_si64((__m64 *)(output_buf[ctr + 1] + output_col), col1); \
+ _mm_store_si64((__m64 *)(output_buf[ctr + 2] + output_col), col2); \
+ _mm_store_si64((__m64 *)(output_buf[ctr + 3] + output_col), col3); \
+}
+
+void jsimd_idct_islow_mmi(void *dct_table, JCOEFPTR coef_block,
+ JSAMPARRAY output_buf, JDIMENSION output_col)
+{
+ __m64 tmp0, tmp1, tmp2, tmp3;
+ __m64 out0, out1, out2, out3, out4, out5, out6, out7;
+ JCOEFPTR inptr;
+ ISLOW_MULT_TYPE *quantptr;
+ JCOEF *wsptr;
+ JCOEF workspace[DCTSIZE2]; /* buffers data between passes */
+
+ /* Pass 1: process columns. */
+
+ inptr = coef_block;
+ quantptr = (ISLOW_MULT_TYPE *)dct_table;
+ wsptr = workspace;
+
+ DO_IDCT_PASS1(1)
+nextcolumn1:
+ inptr += 4;
+ quantptr += 4;
+ wsptr += DCTSIZE * 4;
+ DO_IDCT_PASS1(2)
+nextcolumn2:
+
+ /* Pass 2: process rows. */
+
+ wsptr = workspace;
+
+ DO_IDCT_PASS2(0)
+ wsptr += 4;
+ DO_IDCT_PASS2(4)
+}
diff --git a/media/libjpeg/simd/mips64/jquanti-mmi.c b/media/libjpeg/simd/mips64/jquanti-mmi.c
new file mode 100644
index 0000000000..339002fd80
--- /dev/null
+++ b/media/libjpeg/simd/mips64/jquanti-mmi.c
@@ -0,0 +1,124 @@
+/*
+ * Loongson MMI optimizations for libjpeg-turbo
+ *
+ * Copyright (C) 2016-2017, Loongson Technology Corporation Limited, BeiJing.
+ * All Rights Reserved.
+ * Authors: ZhuChen <zhuchen@loongson.cn>
+ * CaiWanwei <caiwanwei@loongson.cn>
+ * SunZhangzhi <sunzhangzhi-cq@loongson.cn>
+ * Copyright (C) 2018-2019, D. R. Commander. All Rights Reserved.
+ *
+ * Based on the x86 SIMD extension for IJG JPEG library
+ * Copyright (C) 1999-2006, MIYASAKA Masaru.
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/* INTEGER QUANTIZATION AND SAMPLE CONVERSION */
+
+#include "jsimd_mmi.h"
+
+
+#define DO_QUANT() { \
+ __m64 rowl, rowh, rowls, rowhs, rowlsave, rowhsave; \
+ __m64 corrl, corrh, recipl, reciph, scalel, scaleh; \
+ \
+ rowl = _mm_load_si64((__m64 *)&workspace[0]); \
+ rowh = _mm_load_si64((__m64 *)&workspace[4]); \
+ \
+ /* Branch-less absolute value */ \
+ rowls = _mm_srai_pi16(rowl, (WORD_BIT - 1)); /* -1 if value < 0, */ \
+ /* 0 otherwise */ \
+ rowhs = _mm_srai_pi16(rowh, (WORD_BIT - 1)); \
+ \
+ rowl = _mm_xor_si64(rowl, rowls); /* val = -val */ \
+ rowh = _mm_xor_si64(rowh, rowhs); \
+ rowl = _mm_sub_pi16(rowl, rowls); \
+ rowh = _mm_sub_pi16(rowh, rowhs); \
+ \
+ corrl = _mm_load_si64((__m64 *)&divisors[DCTSIZE2 * 1]); /* correction */ \
+ corrh = _mm_load_si64((__m64 *)&divisors[DCTSIZE2 * 1 + 4]); \
+ \
+ rowlsave = rowl = _mm_add_pi16(rowl, corrl); /* correction + roundfactor */ \
+ rowhsave = rowh = _mm_add_pi16(rowh, corrh); \
+ \
+ recipl = _mm_load_si64((__m64 *)&divisors[DCTSIZE2 * 0]); /* reciprocal */ \
+ reciph = _mm_load_si64((__m64 *)&divisors[DCTSIZE2 * 0 + 4]); \
+ \
+ rowl = _mm_mulhi_pi16(rowl, recipl); \
+ rowh = _mm_mulhi_pi16(rowh, reciph); \
+ \
+ /* reciprocal is always negative (MSB=1), so we always need to add the */ \
+ /* initial value (input value is never negative as we inverted it at the */ \
+ /* start of this routine) */ \
+ rowlsave = rowl = _mm_add_pi16(rowl, rowlsave); \
+ rowhsave = rowh = _mm_add_pi16(rowh, rowhsave); \
+ \
+ scalel = _mm_load_si64((__m64 *)&divisors[DCTSIZE2 * 2]); /* scale */ \
+ scaleh = _mm_load_si64((__m64 *)&divisors[DCTSIZE2 * 2 + 4]); \
+ \
+ rowl = _mm_mulhi_pi16(rowl, scalel); \
+ rowh = _mm_mulhi_pi16(rowh, scaleh); \
+ \
+ /* determine if scale is negative */ \
+ scalel = _mm_srai_pi16(scalel, (WORD_BIT - 1)); \
+ scaleh = _mm_srai_pi16(scaleh, (WORD_BIT - 1)); \
+ \
+ /* and add input if it is */ \
+ scalel = _mm_and_si64(scalel, rowlsave); \
+ scaleh = _mm_and_si64(scaleh, rowhsave); \
+ rowl = _mm_add_pi16(rowl, scalel); \
+ rowh = _mm_add_pi16(rowh, scaleh); \
+ \
+ /* then check if negative input */ \
+ rowlsave = _mm_srai_pi16(rowlsave, (WORD_BIT - 1)); \
+ rowhsave = _mm_srai_pi16(rowhsave, (WORD_BIT - 1)); \
+ \
+ /* and add scale if it is */ \
+ rowlsave = _mm_and_si64(rowlsave, scalel); \
+ rowhsave = _mm_and_si64(rowhsave, scaleh); \
+ rowl = _mm_add_pi16(rowl, rowlsave); \
+ rowh = _mm_add_pi16(rowh, rowhsave); \
+ \
+ rowl = _mm_xor_si64(rowl, rowls); /* val = -val */ \
+ rowh = _mm_xor_si64(rowh, rowhs); \
+ rowl = _mm_sub_pi16(rowl, rowls); \
+ rowh = _mm_sub_pi16(rowh, rowhs); \
+ \
+ _mm_store_si64((__m64 *)&output_ptr[0], rowl); \
+ _mm_store_si64((__m64 *)&output_ptr[4], rowh); \
+ \
+ workspace += DCTSIZE; \
+ divisors += DCTSIZE; \
+ output_ptr += DCTSIZE; \
+}
+
+
+void jsimd_quantize_mmi(JCOEFPTR coef_block, DCTELEM *divisors,
+ DCTELEM *workspace)
+{
+ JCOEFPTR output_ptr = coef_block;
+
+ DO_QUANT()
+ DO_QUANT()
+ DO_QUANT()
+ DO_QUANT()
+ DO_QUANT()
+ DO_QUANT()
+ DO_QUANT()
+ DO_QUANT()
+}
diff --git a/media/libjpeg/simd/mips64/jsimd.c b/media/libjpeg/simd/mips64/jsimd.c
new file mode 100644
index 0000000000..917440b43b
--- /dev/null
+++ b/media/libjpeg/simd/mips64/jsimd.c
@@ -0,0 +1,866 @@
+/*
+ * jsimd_mips64.c
+ *
+ * Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
+ * Copyright (C) 2009-2011, 2014, 2016, 2018, 2022, D. R. Commander.
+ * Copyright (C) 2013-2014, MIPS Technologies, Inc., California.
+ * Copyright (C) 2015, 2018, 2022, Matthieu Darbois.
+ * Copyright (C) 2016-2018, Loongson Technology Corporation Limited, BeiJing.
+ *
+ * Based on the x86 SIMD extension for IJG JPEG library,
+ * Copyright (C) 1999-2006, MIYASAKA Masaru.
+ * For conditions of distribution and use, see copyright notice in jsimdext.inc
+ *
+ * This file contains the interface between the "normal" portions
+ * of the library and the SIMD implementations when running on a
+ * 64-bit MIPS architecture.
+ */
+
+#define JPEG_INTERNALS
+#include "../../jinclude.h"
+#include "../../jpeglib.h"
+#include "../../jsimd.h"
+#include "../../jdct.h"
+#include "../../jsimddct.h"
+#include "../jsimd.h"
+
+#include <ctype.h>
+
+static THREAD_LOCAL unsigned int simd_support = ~0;
+
+#if defined(__linux__)
+
+#define SOMEWHAT_SANE_PROC_CPUINFO_SIZE_LIMIT (1024 * 1024)
+
+LOCAL(int)
+check_feature(char *buffer, char *feature)
+{
+ char *p;
+
+ if (*feature == 0)
+ return 0;
+ if (strncmp(buffer, "ASEs implemented", 16) != 0)
+ return 0;
+ buffer += 16;
+ while (isspace(*buffer))
+ buffer++;
+
+ /* Check if 'feature' is present in the buffer as a separate word */
+ while ((p = strstr(buffer, feature))) {
+ if (p > buffer && !isspace(*(p - 1))) {
+ buffer++;
+ continue;
+ }
+ p += strlen(feature);
+ if (*p != 0 && !isspace(*p)) {
+ buffer++;
+ continue;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+LOCAL(int)
+parse_proc_cpuinfo(int bufsize)
+{
+ char *buffer = (char *)malloc(bufsize);
+ FILE *fd;
+
+ simd_support = 0;
+
+ if (!buffer)
+ return 0;
+
+ fd = fopen("/proc/cpuinfo", "r");
+ if (fd) {
+ while (fgets(buffer, bufsize, fd)) {
+ if (!strchr(buffer, '\n') && !feof(fd)) {
+ /* "impossible" happened - insufficient size of the buffer! */
+ fclose(fd);
+ free(buffer);
+ return 0;
+ }
+ if (check_feature(buffer, "loongson-mmi"))
+ simd_support |= JSIMD_MMI;
+ }
+ fclose(fd);
+ }
+ free(buffer);
+ return 1;
+}
+
+#endif
+
+/*
+ * Check what SIMD accelerations are supported.
+ */
+LOCAL(void)
+init_simd(void)
+{
+#ifndef NO_GETENV
+ char *env = NULL;
+#endif
+#if defined(__linux__)
+ int bufsize = 1024; /* an initial guess for the line buffer size limit */
+#endif
+
+ if (simd_support != ~0U)
+ return;
+
+ simd_support = 0;
+
+#if defined(__linux__)
+ while (!parse_proc_cpuinfo(bufsize)) {
+ bufsize *= 2;
+ if (bufsize > SOMEWHAT_SANE_PROC_CPUINFO_SIZE_LIMIT)
+ break;
+ }
+#elif defined(__mips_loongson_vector_rev)
+ /* Only enable MMI by default on non-Linux platforms when the compiler flags
+ * support it. */
+ simd_support |= JSIMD_MMI;
+#endif
+
+#ifndef NO_GETENV
+ /* Force different settings through environment variables */
+ env = getenv("JSIMD_FORCEMMI");
+ if ((env != NULL) && (strcmp(env, "1") == 0))
+ simd_support = JSIMD_MMI;
+ env = getenv("JSIMD_FORCENONE");
+ if ((env != NULL) && (strcmp(env, "1") == 0))
+ simd_support = 0;
+#endif
+}
+
+GLOBAL(int)
+jsimd_can_rgb_ycc(void)
+{
+ init_simd();
+
+ /* The code is optimised for these values only */
+ if (BITS_IN_JSAMPLE != 8)
+ return 0;
+ if (sizeof(JDIMENSION) != 4)
+ return 0;
+ if ((RGB_PIXELSIZE != 3) && (RGB_PIXELSIZE != 4))
+ return 0;
+
+ if (simd_support & JSIMD_MMI)
+ return 1;
+
+ return 0;
+}
+
+GLOBAL(int)
+jsimd_can_rgb_gray(void)
+{
+ init_simd();
+
+ /* The code is optimised for these values only */
+ if (BITS_IN_JSAMPLE != 8)
+ return 0;
+ if (sizeof(JDIMENSION) != 4)
+ return 0;
+ if ((RGB_PIXELSIZE != 3) && (RGB_PIXELSIZE != 4))
+ return 0;
+
+ if (simd_support & JSIMD_MMI)
+ return 1;
+
+ return 0;
+}
+
+GLOBAL(int)
+jsimd_can_ycc_rgb(void)
+{
+ init_simd();
+
+ /* The code is optimised for these values only */
+ if (BITS_IN_JSAMPLE != 8)
+ return 0;
+ if (sizeof(JDIMENSION) != 4)
+ return 0;
+ if ((RGB_PIXELSIZE != 3) && (RGB_PIXELSIZE != 4))
+ return 0;
+
+ if (simd_support & JSIMD_MMI)
+ return 1;
+
+ return 0;
+}
+
+GLOBAL(int)
+jsimd_can_ycc_rgb565(void)
+{
+ return 0;
+}
+
+GLOBAL(int)
+jsimd_c_can_null_convert(void)
+{
+ return 0;
+}
+
+GLOBAL(void)
+jsimd_rgb_ycc_convert(j_compress_ptr cinfo, JSAMPARRAY input_buf,
+ JSAMPIMAGE output_buf, JDIMENSION output_row,
+ int num_rows)
+{
+ void (*mmifct) (JDIMENSION, JSAMPARRAY, JSAMPIMAGE, JDIMENSION, int);
+
+ switch (cinfo->in_color_space) {
+ case JCS_EXT_RGB:
+ mmifct = jsimd_extrgb_ycc_convert_mmi;
+ break;
+ case JCS_EXT_RGBX:
+ case JCS_EXT_RGBA:
+ mmifct = jsimd_extrgbx_ycc_convert_mmi;
+ break;
+ case JCS_EXT_BGR:
+ mmifct = jsimd_extbgr_ycc_convert_mmi;
+ break;
+ case JCS_EXT_BGRX:
+ case JCS_EXT_BGRA:
+ mmifct = jsimd_extbgrx_ycc_convert_mmi;
+ break;
+ case JCS_EXT_XBGR:
+ case JCS_EXT_ABGR:
+ mmifct = jsimd_extxbgr_ycc_convert_mmi;
+ break;
+ case JCS_EXT_XRGB:
+ case JCS_EXT_ARGB:
+ mmifct = jsimd_extxrgb_ycc_convert_mmi;
+ break;
+ default:
+ mmifct = jsimd_rgb_ycc_convert_mmi;
+ break;
+ }
+
+ mmifct(cinfo->image_width, input_buf, output_buf, output_row, num_rows);
+}
+
+GLOBAL(void)
+jsimd_rgb_gray_convert(j_compress_ptr cinfo, JSAMPARRAY input_buf,
+ JSAMPIMAGE output_buf, JDIMENSION output_row,
+ int num_rows)
+{
+ void (*mmifct) (JDIMENSION, JSAMPARRAY, JSAMPIMAGE, JDIMENSION, int);
+
+ switch (cinfo->in_color_space) {
+ case JCS_EXT_RGB:
+ mmifct = jsimd_extrgb_gray_convert_mmi;
+ break;
+ case JCS_EXT_RGBX:
+ case JCS_EXT_RGBA:
+ mmifct = jsimd_extrgbx_gray_convert_mmi;
+ break;
+ case JCS_EXT_BGR:
+ mmifct = jsimd_extbgr_gray_convert_mmi;
+ break;
+ case JCS_EXT_BGRX:
+ case JCS_EXT_BGRA:
+ mmifct = jsimd_extbgrx_gray_convert_mmi;
+ break;
+ case JCS_EXT_XBGR:
+ case JCS_EXT_ABGR:
+ mmifct = jsimd_extxbgr_gray_convert_mmi;
+ break;
+ case JCS_EXT_XRGB:
+ case JCS_EXT_ARGB:
+ mmifct = jsimd_extxrgb_gray_convert_mmi;
+ break;
+ default:
+ mmifct = jsimd_rgb_gray_convert_mmi;
+ break;
+ }
+
+ mmifct(cinfo->image_width, input_buf, output_buf, output_row, num_rows);
+}
+
+GLOBAL(void)
+jsimd_ycc_rgb_convert(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+ JDIMENSION input_row, JSAMPARRAY output_buf,
+ int num_rows)
+{
+ void (*mmifct) (JDIMENSION, JSAMPIMAGE, JDIMENSION, JSAMPARRAY, int);
+
+ switch (cinfo->out_color_space) {
+ case JCS_EXT_RGB:
+ mmifct = jsimd_ycc_extrgb_convert_mmi;
+ break;
+ case JCS_EXT_RGBX:
+ case JCS_EXT_RGBA:
+ mmifct = jsimd_ycc_extrgbx_convert_mmi;
+ break;
+ case JCS_EXT_BGR:
+ mmifct = jsimd_ycc_extbgr_convert_mmi;
+ break;
+ case JCS_EXT_BGRX:
+ case JCS_EXT_BGRA:
+ mmifct = jsimd_ycc_extbgrx_convert_mmi;
+ break;
+ case JCS_EXT_XBGR:
+ case JCS_EXT_ABGR:
+ mmifct = jsimd_ycc_extxbgr_convert_mmi;
+ break;
+ case JCS_EXT_XRGB:
+ case JCS_EXT_ARGB:
+ mmifct = jsimd_ycc_extxrgb_convert_mmi;
+ break;
+ default:
+ mmifct = jsimd_ycc_rgb_convert_mmi;
+ break;
+ }
+
+ mmifct(cinfo->output_width, input_buf, input_row, output_buf, num_rows);
+}
+
+GLOBAL(void)
+jsimd_ycc_rgb565_convert(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+ JDIMENSION input_row, JSAMPARRAY output_buf,
+ int num_rows)
+{
+}
+
+GLOBAL(void)
+jsimd_c_null_convert(j_compress_ptr cinfo, JSAMPARRAY input_buf,
+ JSAMPIMAGE output_buf, JDIMENSION output_row,
+ int num_rows)
+{
+}
+
+GLOBAL(int)
+jsimd_can_h2v2_downsample(void)
+{
+ init_simd();
+
+ /* The code is optimised for these values only */
+ if (BITS_IN_JSAMPLE != 8)
+ return 0;
+ if (sizeof(JDIMENSION) != 4)
+ return 0;
+
+ if (simd_support & JSIMD_MMI)
+ return 1;
+
+ return 0;
+}
+
+GLOBAL(int)
+jsimd_can_h2v2_smooth_downsample(void)
+{
+ return 0;
+}
+
+GLOBAL(int)
+jsimd_can_h2v1_downsample(void)
+{
+ return 0;
+}
+
+GLOBAL(void)
+jsimd_h2v2_downsample(j_compress_ptr cinfo, jpeg_component_info *compptr,
+ JSAMPARRAY input_data, JSAMPARRAY output_data)
+{
+ jsimd_h2v2_downsample_mmi(cinfo->image_width, cinfo->max_v_samp_factor,
+ compptr->v_samp_factor, compptr->width_in_blocks,
+ input_data, output_data);
+}
+
+GLOBAL(void)
+jsimd_h2v2_smooth_downsample(j_compress_ptr cinfo,
+ jpeg_component_info *compptr,
+ JSAMPARRAY input_data, JSAMPARRAY output_data)
+{
+}
+
+GLOBAL(void)
+jsimd_h2v1_downsample(j_compress_ptr cinfo, jpeg_component_info *compptr,
+ JSAMPARRAY input_data, JSAMPARRAY output_data)
+{
+}
+
+GLOBAL(int)
+jsimd_can_h2v2_upsample(void)
+{
+ return 0;
+}
+
+GLOBAL(int)
+jsimd_can_h2v1_upsample(void)
+{
+ return 0;
+}
+
+GLOBAL(int)
+jsimd_can_int_upsample(void)
+{
+ return 0;
+}
+
+GLOBAL(void)
+jsimd_h2v2_upsample(j_decompress_ptr cinfo, jpeg_component_info *compptr,
+ JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr)
+{
+}
+
+GLOBAL(void)
+jsimd_h2v1_upsample(j_decompress_ptr cinfo, jpeg_component_info *compptr,
+ JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr)
+{
+}
+
+GLOBAL(void)
+jsimd_int_upsample(j_decompress_ptr cinfo, jpeg_component_info *compptr,
+ JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr)
+{
+}
+
+GLOBAL(int)
+jsimd_can_h2v2_fancy_upsample(void)
+{
+ init_simd();
+
+ /* The code is optimised for these values only */
+ if (BITS_IN_JSAMPLE != 8)
+ return 0;
+ if (sizeof(JDIMENSION) != 4)
+ return 0;
+
+ if (simd_support & JSIMD_MMI)
+ return 1;
+
+ return 0;
+}
+
+GLOBAL(int)
+jsimd_can_h2v1_fancy_upsample(void)
+{
+ init_simd();
+
+ /* The code is optimised for these values only */
+ if (BITS_IN_JSAMPLE != 8)
+ return 0;
+ if (sizeof(JDIMENSION) != 4)
+ return 0;
+
+ if (simd_support & JSIMD_MMI)
+ return 1;
+
+ return 0;
+}
+
+GLOBAL(void)
+jsimd_h2v2_fancy_upsample(j_decompress_ptr cinfo, jpeg_component_info *compptr,
+ JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr)
+{
+ jsimd_h2v2_fancy_upsample_mmi(cinfo->max_v_samp_factor,
+ compptr->downsampled_width, input_data,
+ output_data_ptr);
+}
+
+GLOBAL(void)
+jsimd_h2v1_fancy_upsample(j_decompress_ptr cinfo, jpeg_component_info *compptr,
+ JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr)
+{
+ jsimd_h2v1_fancy_upsample_mmi(cinfo->max_v_samp_factor,
+ compptr->downsampled_width, input_data,
+ output_data_ptr);
+}
+
+GLOBAL(int)
+jsimd_can_h2v2_merged_upsample(void)
+{
+ init_simd();
+
+ /* The code is optimised for these values only */
+ if (BITS_IN_JSAMPLE != 8)
+ return 0;
+ if (sizeof(JDIMENSION) != 4)
+ return 0;
+
+ if (simd_support & JSIMD_MMI)
+ return 1;
+
+ return 0;
+}
+
+GLOBAL(int)
+jsimd_can_h2v1_merged_upsample(void)
+{
+ init_simd();
+
+ /* The code is optimised for these values only */
+ if (BITS_IN_JSAMPLE != 8)
+ return 0;
+ if (sizeof(JDIMENSION) != 4)
+ return 0;
+
+ if (simd_support & JSIMD_MMI)
+ return 1;
+
+ return 0;
+}
+
+GLOBAL(void)
+jsimd_h2v2_merged_upsample(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+ JDIMENSION in_row_group_ctr, JSAMPARRAY output_buf)
+{
+ void (*mmifct) (JDIMENSION, JSAMPIMAGE, JDIMENSION, JSAMPARRAY);
+
+ switch (cinfo->out_color_space) {
+ case JCS_EXT_RGB:
+ mmifct = jsimd_h2v2_extrgb_merged_upsample_mmi;
+ break;
+ case JCS_EXT_RGBX:
+ case JCS_EXT_RGBA:
+ mmifct = jsimd_h2v2_extrgbx_merged_upsample_mmi;
+ break;
+ case JCS_EXT_BGR:
+ mmifct = jsimd_h2v2_extbgr_merged_upsample_mmi;
+ break;
+ case JCS_EXT_BGRX:
+ case JCS_EXT_BGRA:
+ mmifct = jsimd_h2v2_extbgrx_merged_upsample_mmi;
+ break;
+ case JCS_EXT_XBGR:
+ case JCS_EXT_ABGR:
+ mmifct = jsimd_h2v2_extxbgr_merged_upsample_mmi;
+ break;
+ case JCS_EXT_XRGB:
+ case JCS_EXT_ARGB:
+ mmifct = jsimd_h2v2_extxrgb_merged_upsample_mmi;
+ break;
+ default:
+ mmifct = jsimd_h2v2_merged_upsample_mmi;
+ break;
+ }
+
+ mmifct(cinfo->output_width, input_buf, in_row_group_ctr, output_buf);
+}
+
+GLOBAL(void)
+jsimd_h2v1_merged_upsample(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+ JDIMENSION in_row_group_ctr, JSAMPARRAY output_buf)
+{
+ void (*mmifct) (JDIMENSION, JSAMPIMAGE, JDIMENSION, JSAMPARRAY);
+
+ switch (cinfo->out_color_space) {
+ case JCS_EXT_RGB:
+ mmifct = jsimd_h2v1_extrgb_merged_upsample_mmi;
+ break;
+ case JCS_EXT_RGBX:
+ case JCS_EXT_RGBA:
+ mmifct = jsimd_h2v1_extrgbx_merged_upsample_mmi;
+ break;
+ case JCS_EXT_BGR:
+ mmifct = jsimd_h2v1_extbgr_merged_upsample_mmi;
+ break;
+ case JCS_EXT_BGRX:
+ case JCS_EXT_BGRA:
+ mmifct = jsimd_h2v1_extbgrx_merged_upsample_mmi;
+ break;
+ case JCS_EXT_XBGR:
+ case JCS_EXT_ABGR:
+ mmifct = jsimd_h2v1_extxbgr_merged_upsample_mmi;
+ break;
+ case JCS_EXT_XRGB:
+ case JCS_EXT_ARGB:
+ mmifct = jsimd_h2v1_extxrgb_merged_upsample_mmi;
+ break;
+ default:
+ mmifct = jsimd_h2v1_merged_upsample_mmi;
+ break;
+ }
+
+ mmifct(cinfo->output_width, input_buf, in_row_group_ctr, output_buf);
+}
+
+GLOBAL(int)
+jsimd_can_convsamp(void)
+{
+ return 0;
+}
+
+GLOBAL(int)
+jsimd_can_convsamp_float(void)
+{
+ return 0;
+}
+
+GLOBAL(void)
+jsimd_convsamp(JSAMPARRAY sample_data, JDIMENSION start_col,
+ DCTELEM *workspace)
+{
+}
+
+GLOBAL(void)
+jsimd_convsamp_float(JSAMPARRAY sample_data, JDIMENSION start_col,
+ FAST_FLOAT *workspace)
+{
+}
+
+GLOBAL(int)
+jsimd_can_fdct_islow(void)
+{
+ init_simd();
+
+ /* The code is optimised for these values only */
+ if (DCTSIZE != 8)
+ return 0;
+ if (sizeof(DCTELEM) != 2)
+ return 0;
+
+ if (simd_support & JSIMD_MMI)
+ return 1;
+
+ return 0;
+}
+
+GLOBAL(int)
+jsimd_can_fdct_ifast(void)
+{
+ init_simd();
+
+ /* The code is optimised for these values only */
+ if (DCTSIZE != 8)
+ return 0;
+ if (sizeof(DCTELEM) != 2)
+ return 0;
+
+ if (simd_support & JSIMD_MMI)
+ return 1;
+
+ return 0;
+}
+
+GLOBAL(int)
+jsimd_can_fdct_float(void)
+{
+ return 0;
+}
+
+GLOBAL(void)
+jsimd_fdct_islow(DCTELEM *data)
+{
+ jsimd_fdct_islow_mmi(data);
+}
+
+GLOBAL(void)
+jsimd_fdct_ifast(DCTELEM *data)
+{
+ jsimd_fdct_ifast_mmi(data);
+}
+
+GLOBAL(void)
+jsimd_fdct_float(FAST_FLOAT *data)
+{
+}
+
+GLOBAL(int)
+jsimd_can_quantize(void)
+{
+ init_simd();
+
+ /* The code is optimised for these values only */
+ if (DCTSIZE != 8)
+ return 0;
+ if (sizeof(JCOEF) != 2)
+ return 0;
+ if (sizeof(DCTELEM) != 2)
+ return 0;
+
+ if (simd_support & JSIMD_MMI)
+ return 1;
+
+ return 0;
+}
+
+GLOBAL(int)
+jsimd_can_quantize_float(void)
+{
+ return 0;
+}
+
+GLOBAL(void)
+jsimd_quantize(JCOEFPTR coef_block, DCTELEM *divisors, DCTELEM *workspace)
+{
+ jsimd_quantize_mmi(coef_block, divisors, workspace);
+}
+
+GLOBAL(void)
+jsimd_quantize_float(JCOEFPTR coef_block, FAST_FLOAT *divisors,
+ FAST_FLOAT *workspace)
+{
+}
+
+GLOBAL(int)
+jsimd_can_idct_2x2(void)
+{
+ return 0;
+}
+
+GLOBAL(int)
+jsimd_can_idct_4x4(void)
+{
+ return 0;
+}
+
+GLOBAL(int)
+jsimd_can_idct_6x6(void)
+{
+ return 0;
+}
+
+GLOBAL(int)
+jsimd_can_idct_12x12(void)
+{
+ return 0;
+}
+
+GLOBAL(void)
+jsimd_idct_2x2(j_decompress_ptr cinfo, jpeg_component_info *compptr,
+ JCOEFPTR coef_block, JSAMPARRAY output_buf,
+ JDIMENSION output_col)
+{
+}
+
+GLOBAL(void)
+jsimd_idct_4x4(j_decompress_ptr cinfo, jpeg_component_info *compptr,
+ JCOEFPTR coef_block, JSAMPARRAY output_buf,
+ JDIMENSION output_col)
+{
+}
+
+GLOBAL(void)
+jsimd_idct_6x6(j_decompress_ptr cinfo, jpeg_component_info *compptr,
+ JCOEFPTR coef_block, JSAMPARRAY output_buf,
+ JDIMENSION output_col)
+{
+}
+
+GLOBAL(void)
+jsimd_idct_12x12(j_decompress_ptr cinfo, jpeg_component_info *compptr,
+ JCOEFPTR coef_block, JSAMPARRAY output_buf,
+ JDIMENSION output_col)
+{
+}
+
+GLOBAL(int)
+jsimd_can_idct_islow(void)
+{
+ init_simd();
+
+ /* The code is optimised for these values only */
+ if (DCTSIZE != 8)
+ return 0;
+ if (sizeof(JCOEF) != 2)
+ return 0;
+ if (BITS_IN_JSAMPLE != 8)
+ return 0;
+ if (sizeof(JDIMENSION) != 4)
+ return 0;
+ if (sizeof(ISLOW_MULT_TYPE) != 2)
+ return 0;
+
+ if (simd_support & JSIMD_MMI)
+ return 1;
+
+ return 0;
+}
+
+GLOBAL(int)
+jsimd_can_idct_ifast(void)
+{
+ init_simd();
+
+ /* The code is optimised for these values only */
+ if (DCTSIZE != 8)
+ return 0;
+ if (sizeof(JCOEF) != 2)
+ return 0;
+ if (BITS_IN_JSAMPLE != 8)
+ return 0;
+ if (sizeof(JDIMENSION) != 4)
+ return 0;
+ if (sizeof(IFAST_MULT_TYPE) != 2)
+ return 0;
+ if (IFAST_SCALE_BITS != 2)
+ return 0;
+
+ if (simd_support & JSIMD_MMI)
+ return 1;
+
+ return 0;
+}
+
+GLOBAL(int)
+jsimd_can_idct_float(void)
+{
+ return 0;
+}
+
+GLOBAL(void)
+jsimd_idct_islow(j_decompress_ptr cinfo, jpeg_component_info *compptr,
+ JCOEFPTR coef_block, JSAMPARRAY output_buf,
+ JDIMENSION output_col)
+{
+ jsimd_idct_islow_mmi(compptr->dct_table, coef_block, output_buf, output_col);
+}
+
+GLOBAL(void)
+jsimd_idct_ifast(j_decompress_ptr cinfo, jpeg_component_info *compptr,
+ JCOEFPTR coef_block, JSAMPARRAY output_buf,
+ JDIMENSION output_col)
+{
+ jsimd_idct_ifast_mmi(compptr->dct_table, coef_block, output_buf, output_col);
+}
+
+GLOBAL(void)
+jsimd_idct_float(j_decompress_ptr cinfo, jpeg_component_info *compptr,
+ JCOEFPTR coef_block, JSAMPARRAY output_buf,
+ JDIMENSION output_col)
+{
+}
+
+GLOBAL(int)
+jsimd_can_huff_encode_one_block(void)
+{
+ return 0;
+}
+
+GLOBAL(JOCTET *)
+jsimd_huff_encode_one_block(void *state, JOCTET *buffer, JCOEFPTR block,
+ int last_dc_val, c_derived_tbl *dctbl,
+ c_derived_tbl *actbl)
+{
+ return NULL;
+}
+
+GLOBAL(int)
+jsimd_can_encode_mcu_AC_first_prepare(void)
+{
+ return 0;
+}
+
+GLOBAL(void)
+jsimd_encode_mcu_AC_first_prepare(const JCOEF *block,
+ const int *jpeg_natural_order_start, int Sl,
+ int Al, UJCOEF *values, size_t *zerobits)
+{
+}
+
+GLOBAL(int)
+jsimd_can_encode_mcu_AC_refine_prepare(void)
+{
+ return 0;
+}
+
+GLOBAL(int)
+jsimd_encode_mcu_AC_refine_prepare(const JCOEF *block,
+ const int *jpeg_natural_order_start, int Sl,
+ int Al, UJCOEF *absvalues, size_t *bits)
+{
+ return 0;
+}
diff --git a/media/libjpeg/simd/mips64/jsimd_mmi.h b/media/libjpeg/simd/mips64/jsimd_mmi.h
new file mode 100644
index 0000000000..5e4261c9d9
--- /dev/null
+++ b/media/libjpeg/simd/mips64/jsimd_mmi.h
@@ -0,0 +1,69 @@
+/*
+ * Loongson MMI optimizations for libjpeg-turbo
+ *
+ * Copyright (C) 2016-2018, Loongson Technology Corporation Limited, BeiJing.
+ * All Rights Reserved.
+ * Authors: ZhuChen <zhuchen@loongson.cn>
+ * CaiWanwei <caiwanwei@loongson.cn>
+ * SunZhangzhi <sunzhangzhi-cq@loongson.cn>
+ * QingfaLiu <liuqingfa-hf@loongson.cn>
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+#define JPEG_INTERNALS
+#include "../../jinclude.h"
+#include "../../jpeglib.h"
+#include "../../jdct.h"
+#include "loongson-mmintrin.h"
+
+
+/* Common code */
+#if defined(_ABI64) && _MIPS_SIM == _ABI64
+# define PTR_ADDU "daddu "
+# define PTR_SLL "dsll "
+#else
+# define PTR_ADDU "addu "
+# define PTR_SLL "sll "
+#endif
+
+#define SIZEOF_MMWORD 8
+#define BYTE_BIT 8
+#define WORD_BIT 16
+#define SCALEBITS 16
+
+#define _uint64_set_pi8(a, b, c, d, e, f, g, h) \
+ (((uint64_t)(uint8_t)a << 56) | \
+ ((uint64_t)(uint8_t)b << 48) | \
+ ((uint64_t)(uint8_t)c << 40) | \
+ ((uint64_t)(uint8_t)d << 32) | \
+ ((uint64_t)(uint8_t)e << 24) | \
+ ((uint64_t)(uint8_t)f << 16) | \
+ ((uint64_t)(uint8_t)g << 8) | \
+ ((uint64_t)(uint8_t)h))
+#define _uint64_set1_pi8(a) _uint64_set_pi8(a, a, a, a, a, a, a, a)
+#define _uint64_set_pi16(a, b, c, d) \
+ (((uint64_t)(uint16_t)a << 48) | \
+ ((uint64_t)(uint16_t)b << 32) | \
+ ((uint64_t)(uint16_t)c << 16) | \
+ ((uint64_t)(uint16_t)d))
+#define _uint64_set1_pi16(a) _uint64_set_pi16(a, a, a, a)
+#define _uint64_set_pi32(a, b) \
+ (((uint64_t)(uint32_t)a << 32) | \
+ ((uint64_t)(uint32_t)b))
+
+#define get_const_value(index) (*(__m64 *)&const_value[index])
diff --git a/media/libjpeg/simd/mips64/loongson-mmintrin.h b/media/libjpeg/simd/mips64/loongson-mmintrin.h
new file mode 100644
index 0000000000..db9b35ab60
--- /dev/null
+++ b/media/libjpeg/simd/mips64/loongson-mmintrin.h
@@ -0,0 +1,1334 @@
+/*
+ * Loongson MMI optimizations for libjpeg-turbo
+ *
+ * Copyright (C) 2016-2018, Loongson Technology Corporation Limited, BeiJing.
+ * All Rights Reserved.
+ * Copyright (C) 2019, D. R. Commander. All Rights Reserved.
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+#ifndef __LOONGSON_MMINTRIN_H__
+#define __LOONGSON_MMINTRIN_H__
+
+#include <stdint.h>
+
+
+#define FUNCTION_ATTRIBS \
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+
+
+/* Vectors are stored in 64-bit floating-point registers. */
+typedef double __m64;
+
+/* Having a 32-bit datatype allows us to use 32-bit loads in places like
+ load8888. */
+typedef float __m32;
+
+
+/********** Set Operations **********/
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_setzero_si64(void)
+{
+ return 0.0;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_set_pi8(uint8_t __b7, uint8_t __b6, uint8_t __b5, uint8_t __b4,
+ uint8_t __b3, uint8_t __b2, uint8_t __b1, uint8_t __b0)
+{
+ __m64 ret;
+ uint32_t lo = ((uint32_t)__b6 << 24) |
+ ((uint32_t)__b4 << 16) |
+ ((uint32_t)__b2 << 8) |
+ (uint32_t)__b0;
+ uint32_t hi = ((uint32_t)__b7 << 24) |
+ ((uint32_t)__b5 << 16) |
+ ((uint32_t)__b3 << 8) |
+ (uint32_t)__b1;
+
+ asm("mtc1 %1, %0\n\t"
+ "mtc1 %2, $f0\n\t"
+ "punpcklbh %0, %0, $f0\n\t"
+ : "=f" (ret)
+ : "r" (lo), "r" (hi)
+ : "$f0"
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_set_pi16(uint16_t __h3, uint16_t __h2, uint16_t __h1, uint16_t __h0)
+{
+ __m64 ret;
+ uint32_t lo = ((uint32_t)__h2 << 16) | (uint32_t)__h0;
+ uint32_t hi = ((uint32_t)__h3 << 16) | (uint32_t)__h1;
+
+ asm("mtc1 %1, %0\n\t"
+ "mtc1 %2, $f0\n\t"
+ "punpcklhw %0, %0, $f0\n\t"
+ : "=f" (ret)
+ : "r" (lo), "r" (hi)
+ : "$f0"
+ );
+
+ return ret;
+}
+
+#define _MM_SHUFFLE(fp3, fp2, fp1, fp0) \
+ (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_set_pi32(uint32_t __i1, uint32_t __i0)
+{
+ if (__builtin_constant_p(__i1) && __builtin_constant_p(__i0)) {
+ uint64_t val = ((uint64_t)__i1 << 32) |
+ ((uint64_t)__i0 << 0);
+
+ return *(__m64 *)&val;
+ } else if (__i1 == __i0) {
+ uint64_t imm = _MM_SHUFFLE(1, 0, 1, 0);
+ __m64 ret;
+
+ asm("pshufh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (*(__m32 *)&__i1), "f" (*(__m64 *)&imm)
+ );
+
+ return ret;
+ } else {
+ uint64_t val = ((uint64_t)__i1 << 32) |
+ ((uint64_t)__i0 << 0);
+
+ return *(__m64 *)&val;
+ }
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_set1_pi8(uint8_t __b0)
+{
+ __m64 ret;
+
+ asm("sll $8, %1, 8\n\t"
+ "or %1, %1, $8\n\t"
+ "mtc1 %1, %0\n\t"
+ "mtc1 $0, $f0\n\t"
+ "pshufh %0, %0, $f0\n\t"
+ : "=f" (ret)
+ : "r" (__b0)
+ : "$8", "$f0"
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_set1_pi16(uint16_t __h0)
+{
+ __m64 ret;
+
+ asm("mtc1 %1, %0\n\t"
+ "mtc1 $0, $f0\n\t"
+ "pshufh %0, %0, $f0\n\t"
+ : "=f" (ret)
+ : "r" (__h0)
+ : "$8", "$f0"
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_set1_pi32(unsigned __i0)
+{
+ return _mm_set_pi32(__i0, __i0);
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_setr_pi8(uint8_t __h0, uint8_t __h1, uint8_t __h2, uint8_t __h3,
+ uint8_t __h4, uint8_t __h5, uint8_t __h6, uint8_t __h7)
+{
+ return _mm_set_pi8(__h7, __h6, __h5, __h4,
+ __h3, __h2, __h1, __h0);
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_setr_pi16(uint16_t __w0, uint16_t __w1, uint16_t __w2, uint16_t __w3)
+{
+ return _mm_set_pi16(__w3, __w2, __w1, __w0);
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_setr_pi32(uint32_t __i0, uint32_t __i1)
+{
+ return _mm_set_pi32(__i1, __i0);
+}
+
+
+/********** Arithmetic Operations **********/
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_add_pi8(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("paddb %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_add_pi16(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("paddh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_add_pi32(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("paddw %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_add_si64(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("paddd %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_adds_pi8(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("paddsb %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_adds_pi16(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("paddsh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_adds_pu8(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("paddusb %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_adds_pu16(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("paddush %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_avg_pu8(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("pavgb %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_avg_pu16(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("pavgh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_madd_pi16(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("pmaddhw %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_max_pi16(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("pmaxsh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_max_pu8(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("pmaxub %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_min_pi16(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("pminsh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_min_pu8(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("pminub %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline int FUNCTION_ATTRIBS
+_mm_movemask_pi8(__m64 __m1)
+{
+ int ret;
+
+ asm("pmovmskb %0, %1\n\t"
+ : "=r" (ret)
+ : "y" (__m1)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_mulhi_pi16(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("pmulhh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_mulhi_pu16(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("pmulhuh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_mullo_pi16(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("pmullh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_mul_pu32(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("pmuluw %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_sad_pu8(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("psadbh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_asub_pu8(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("pasubub %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_biadd_pu8(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("biadd %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_sub_pi8(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("psubb %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_sub_pi16(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("psubh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_sub_pi32(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("psubw %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_sub_si64(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("psubd %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_subs_pi8(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("psubsb %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_subs_pi16(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("psubsh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_subs_pu8(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("psubusb %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_subs_pu16(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("psubush %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+
+/********** Logical Operations **********/
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_and_si64(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("and %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_andnot_si64(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("andn %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_or_si32(__m32 __m1, __m32 __m2)
+{
+ __m32 ret;
+
+ asm("or %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_or_si64(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("or %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_xor_si64(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("xor %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+
+/********** Shift Operations **********/
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_slli_pi16(__m64 __m, int64_t __count)
+{
+ __m64 ret;
+
+ asm("psllh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m), "f" (*(__m64 *)&__count)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_slli_pi32(__m64 __m, int64_t __count)
+{
+ __m64 ret;
+
+ asm("psllw %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m), "f" (*(__m64 *)&__count)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_slli_si64(__m64 __m, int64_t __count)
+{
+ __m64 ret;
+
+ asm("dsll %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m), "f" (*(__m64 *)&__count)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_srli_pi16(__m64 __m, int64_t __count)
+{
+ __m64 ret;
+
+ asm("psrlh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m), "f" (*(__m64 *)&__count)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_srli_pi32(__m64 __m, int64_t __count)
+{
+ __m64 ret;
+
+ asm("psrlw %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m), "f" (*(__m64 *)&__count)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_srli_si64(__m64 __m, int64_t __count)
+{
+ __m64 ret;
+
+ asm("dsrl %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m), "f" (*(__m64 *)&__count)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_srai_pi16(__m64 __m, int64_t __count)
+{
+ __m64 ret;
+
+ asm("psrah %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m), "f" (*(__m64 *)&__count)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_srai_pi32(__m64 __m, int64_t __count)
+{
+ __m64 ret;
+
+ asm("psraw %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m), "f" (*(__m64 *)&__count)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_srai_si64(__m64 __m, int64_t __count)
+{
+ __m64 ret;
+
+ asm("dsra %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m), "f" (*(__m64 *)&__count)
+ );
+
+ return ret;
+}
+
+
+/********** Conversion Intrinsics **********/
+
+extern __inline __m64 FUNCTION_ATTRIBS
+to_m64(uint64_t x)
+{
+ return *(__m64 *)&x;
+}
+
+extern __inline uint64_t FUNCTION_ATTRIBS
+to_uint64(__m64 x)
+{
+ return *(uint64_t *)&x;
+}
+
+
+/********** Comparison Intrinsics **********/
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_cmpeq_pi8(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("pcmpeqb %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_cmpeq_pi16(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("pcmpeqh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_cmpeq_pi32(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("pcmpeqw %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_cmpgt_pi8(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("pcmpgtb %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_cmpgt_pi16(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("pcmpgth %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_cmpgt_pi32(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("pcmpgtw %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_cmplt_pi8(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("pcmpltb %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_cmplt_pi16(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("pcmplth %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_cmplt_pi32(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("pcmpltw %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+
+/********** Miscellaneous Operations **********/
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_packs_pi16(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("packsshb %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_packs_pi32(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("packsswh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_packs_pi32_f(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("packsswh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_packs_pu16(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("packushb %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_extract_pi16(__m64 __m, int64_t __pos)
+{
+ __m64 ret;
+
+ asm("pextrh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m), "f" (*(__m64 *)&__pos)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_insert_pi16(__m64 __m1, __m64 __m2, int64_t __pos)
+{
+ __m64 ret;
+
+ switch (__pos) {
+ case 0:
+
+ asm("pinsrh_0 %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2), "i" (__pos)
+ );
+
+ break;
+
+ case 1:
+
+ asm("pinsrh_1 %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2), "i" (__pos)
+ );
+
+ break;
+ case 2:
+
+ asm("pinsrh_2 %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2), "i" (__pos)
+ );
+
+ break;
+
+ case 3:
+
+ asm("pinsrh_3 %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2), "i" (__pos)
+ );
+
+ break;
+ }
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_shuffle_pi16(__m64 __m, int64_t __n)
+{
+ __m64 ret;
+
+ asm("pshufh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m), "f" (*(__m64 *)&__n)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_unpackhi_pi8(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("punpckhbh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_unpackhi_pi8_f(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("punpckhbh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_unpackhi_pi16(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("punpckhhw %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_unpackhi_pi16_f(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("punpckhhw %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_unpackhi_pi32(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("punpckhwd %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_unpacklo_pi8(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("punpcklbh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+/* Since punpcklbh cares about the high 32-bits, we use the __m64 datatype,
+ which preserves the data. */
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_unpacklo_pi8_f64(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("punpcklbh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+/* Since punpcklbh doesn't care about the high 32-bits, we use the __m32,
+ datatype, which allows load8888 to use 32-bit loads. */
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_unpacklo_pi8_f(__m32 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("punpcklbh %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_unpacklo_pi16(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("punpcklhw %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_unpacklo_pi16_f(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("punpcklhw %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_unpacklo_pi32(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("punpcklwd %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_unpacklo_pi32_f(__m64 __m1, __m64 __m2)
+{
+ __m64 ret;
+
+ asm("punpcklwd %0, %1, %2\n\t"
+ : "=f" (ret)
+ : "f" (__m1), "f" (__m2)
+ );
+
+ return ret;
+}
+
+extern __inline void FUNCTION_ATTRIBS
+_mm_store_pi32(__m32 *dest, __m64 src)
+{
+ src = _mm_packs_pu16(src, _mm_setzero_si64());
+
+ asm("swc1 %1, %0\n\t"
+ : "=m" (*dest)
+ : "f" (src)
+ : "memory"
+ );
+}
+
+extern __inline void FUNCTION_ATTRIBS
+_mm_store_si64(__m64 *dest, __m64 src)
+{
+ asm("sdc1 %1, %0 \n\t"
+ : "=m" (*dest)
+ : "f" (src)
+ : "memory"
+ );
+}
+
+extern __inline void FUNCTION_ATTRIBS
+_mm_storeu_si64(__m64 *dest, __m64 src)
+{
+ asm("gssdlc1 %1, 7(%0) \n\t"
+ "gssdrc1 %1, 0(%0) \n\t"
+ :
+ : "r" (dest), "f" (src)
+ : "memory"
+ );
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_load_si32(const __m32 *src)
+{
+ __m32 ret;
+
+ asm("lwc1 %0, %1\n\t"
+ : "=f" (ret)
+ : "m" (*src)
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_load_si64(const __m64 *src)
+{
+ __m64 ret;
+
+ asm("ldc1 %0, %1\n\t"
+ : "=f" (ret)
+ : "m" (*src)
+ : "memory"
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_loadu_si64(const __m64 *src)
+{
+ __m64 ret;
+
+ asm("gsldlc1 %0, 7(%1)\n\t"
+ "gsldrc1 %0, 0(%1)\n\t"
+ : "=f" (ret)
+ : "r" (src)
+ : "memory"
+ );
+
+ return ret;
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_loadlo_pi8(const uint32_t *src)
+{
+ return _mm_unpacklo_pi8_f(*(__m32 *)src, _mm_setzero_si64());
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_loadlo_pi8_f(__m64 src)
+{
+ return _mm_unpacklo_pi8_f64(src, _mm_setzero_si64());
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_loadhi_pi8_f(__m64 src)
+{
+ return _mm_unpackhi_pi8_f(src, _mm_setzero_si64());
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_loadlo_pi16(__m64 src)
+{
+ return _mm_unpacklo_pi16(src, _mm_setzero_si64());
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_loadlo_pi16_f(__m64 src)
+{
+ return _mm_unpacklo_pi16_f(_mm_setzero_si64(), src);
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_loadhi_pi16(__m64 src)
+{
+ return _mm_unpackhi_pi16(src, _mm_setzero_si64());
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_loadhi_pi16_f(__m64 src)
+{
+ return _mm_unpackhi_pi16_f(_mm_setzero_si64(), src);
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_expand_alpha(__m64 pixel)
+{
+ return _mm_shuffle_pi16(pixel, _MM_SHUFFLE(3, 3, 3, 3));
+}
+
+extern __inline __m64 FUNCTION_ATTRIBS
+_mm_expand_alpha_rev(__m64 pixel)
+{
+ return _mm_shuffle_pi16(pixel, _MM_SHUFFLE(0, 0, 0, 0));
+}
+
+#endif /* __LOONGSON_MMINTRIN_H__ */