1*b2055c35SXin Li // Copyright 2015 Google Inc. All Rights Reserved.
2*b2055c35SXin Li //
3*b2055c35SXin Li // Use of this source code is governed by a BSD-style license
4*b2055c35SXin Li // that can be found in the COPYING file in the root of the source
5*b2055c35SXin Li // tree. An additional intellectual property rights grant can be found
6*b2055c35SXin Li // in the file PATENTS. All contributing project authors may
7*b2055c35SXin Li // be found in the AUTHORS file in the root of the source tree.
8*b2055c35SXin Li // -----------------------------------------------------------------------------
9*b2055c35SXin Li //
10*b2055c35SXin Li // MIPS version of lossless functions
11*b2055c35SXin Li //
12*b2055c35SXin Li // Author(s): Djordje Pesut ([email protected])
13*b2055c35SXin Li // Jovan Zelincevic ([email protected])
14*b2055c35SXin Li
15*b2055c35SXin Li #include "src/dsp/dsp.h"
16*b2055c35SXin Li #include "src/dsp/lossless.h"
17*b2055c35SXin Li #include "src/dsp/lossless_common.h"
18*b2055c35SXin Li
19*b2055c35SXin Li #if defined(WEBP_USE_MIPS32)
20*b2055c35SXin Li
21*b2055c35SXin Li #include <assert.h>
22*b2055c35SXin Li #include <math.h>
23*b2055c35SXin Li #include <stdlib.h>
24*b2055c35SXin Li #include <string.h>
25*b2055c35SXin Li
FastSLog2Slow_MIPS32(uint32_t v)26*b2055c35SXin Li static float FastSLog2Slow_MIPS32(uint32_t v) {
27*b2055c35SXin Li assert(v >= LOG_LOOKUP_IDX_MAX);
28*b2055c35SXin Li if (v < APPROX_LOG_WITH_CORRECTION_MAX) {
29*b2055c35SXin Li uint32_t log_cnt, y, correction;
30*b2055c35SXin Li const int c24 = 24;
31*b2055c35SXin Li const float v_f = (float)v;
32*b2055c35SXin Li uint32_t temp;
33*b2055c35SXin Li
34*b2055c35SXin Li // Xf = 256 = 2^8
35*b2055c35SXin Li // log_cnt is index of leading one in upper 24 bits
36*b2055c35SXin Li __asm__ volatile(
37*b2055c35SXin Li "clz %[log_cnt], %[v] \n\t"
38*b2055c35SXin Li "addiu %[y], $zero, 1 \n\t"
39*b2055c35SXin Li "subu %[log_cnt], %[c24], %[log_cnt] \n\t"
40*b2055c35SXin Li "sllv %[y], %[y], %[log_cnt] \n\t"
41*b2055c35SXin Li "srlv %[temp], %[v], %[log_cnt] \n\t"
42*b2055c35SXin Li : [log_cnt]"=&r"(log_cnt), [y]"=&r"(y),
43*b2055c35SXin Li [temp]"=r"(temp)
44*b2055c35SXin Li : [c24]"r"(c24), [v]"r"(v)
45*b2055c35SXin Li );
46*b2055c35SXin Li
47*b2055c35SXin Li // vf = (2^log_cnt) * Xf; where y = 2^log_cnt and Xf < 256
48*b2055c35SXin Li // Xf = floor(Xf) * (1 + (v % y) / v)
49*b2055c35SXin Li // log2(Xf) = log2(floor(Xf)) + log2(1 + (v % y) / v)
50*b2055c35SXin Li // The correction factor: log(1 + d) ~ d; for very small d values, so
51*b2055c35SXin Li // log2(1 + (v % y) / v) ~ LOG_2_RECIPROCAL * (v % y)/v
52*b2055c35SXin Li // LOG_2_RECIPROCAL ~ 23/16
53*b2055c35SXin Li
54*b2055c35SXin Li // (v % y) = (v % 2^log_cnt) = v & (2^log_cnt - 1)
55*b2055c35SXin Li correction = (23 * (v & (y - 1))) >> 4;
56*b2055c35SXin Li return v_f * (kLog2Table[temp] + log_cnt) + correction;
57*b2055c35SXin Li } else {
58*b2055c35SXin Li return (float)(LOG_2_RECIPROCAL * v * log((double)v));
59*b2055c35SXin Li }
60*b2055c35SXin Li }
61*b2055c35SXin Li
FastLog2Slow_MIPS32(uint32_t v)62*b2055c35SXin Li static float FastLog2Slow_MIPS32(uint32_t v) {
63*b2055c35SXin Li assert(v >= LOG_LOOKUP_IDX_MAX);
64*b2055c35SXin Li if (v < APPROX_LOG_WITH_CORRECTION_MAX) {
65*b2055c35SXin Li uint32_t log_cnt, y;
66*b2055c35SXin Li const int c24 = 24;
67*b2055c35SXin Li double log_2;
68*b2055c35SXin Li uint32_t temp;
69*b2055c35SXin Li
70*b2055c35SXin Li __asm__ volatile(
71*b2055c35SXin Li "clz %[log_cnt], %[v] \n\t"
72*b2055c35SXin Li "addiu %[y], $zero, 1 \n\t"
73*b2055c35SXin Li "subu %[log_cnt], %[c24], %[log_cnt] \n\t"
74*b2055c35SXin Li "sllv %[y], %[y], %[log_cnt] \n\t"
75*b2055c35SXin Li "srlv %[temp], %[v], %[log_cnt] \n\t"
76*b2055c35SXin Li : [log_cnt]"=&r"(log_cnt), [y]"=&r"(y),
77*b2055c35SXin Li [temp]"=r"(temp)
78*b2055c35SXin Li : [c24]"r"(c24), [v]"r"(v)
79*b2055c35SXin Li );
80*b2055c35SXin Li
81*b2055c35SXin Li log_2 = kLog2Table[temp] + log_cnt;
82*b2055c35SXin Li if (v >= APPROX_LOG_MAX) {
83*b2055c35SXin Li // Since the division is still expensive, add this correction factor only
84*b2055c35SXin Li // for large values of 'v'.
85*b2055c35SXin Li
86*b2055c35SXin Li const uint32_t correction = (23 * (v & (y - 1))) >> 4;
87*b2055c35SXin Li log_2 += (double)correction / v;
88*b2055c35SXin Li }
89*b2055c35SXin Li return (float)log_2;
90*b2055c35SXin Li } else {
91*b2055c35SXin Li return (float)(LOG_2_RECIPROCAL * log((double)v));
92*b2055c35SXin Li }
93*b2055c35SXin Li }
94*b2055c35SXin Li
95*b2055c35SXin Li // C version of this function:
96*b2055c35SXin Li // int i = 0;
97*b2055c35SXin Li // int64_t cost = 0;
98*b2055c35SXin Li // const uint32_t* pop = &population[4];
99*b2055c35SXin Li // const uint32_t* LoopEnd = &population[length];
100*b2055c35SXin Li // while (pop != LoopEnd) {
101*b2055c35SXin Li // ++i;
102*b2055c35SXin Li // cost += i * *pop;
103*b2055c35SXin Li // cost += i * *(pop + 1);
104*b2055c35SXin Li // pop += 2;
105*b2055c35SXin Li // }
106*b2055c35SXin Li // return cost;
ExtraCost_MIPS32(const uint32_t * const population,int length)107*b2055c35SXin Li static uint32_t ExtraCost_MIPS32(const uint32_t* const population, int length) {
108*b2055c35SXin Li int i, temp0, temp1;
109*b2055c35SXin Li const uint32_t* pop = &population[4];
110*b2055c35SXin Li const uint32_t* const LoopEnd = &population[length];
111*b2055c35SXin Li
112*b2055c35SXin Li __asm__ volatile(
113*b2055c35SXin Li "mult $zero, $zero \n\t"
114*b2055c35SXin Li "xor %[i], %[i], %[i] \n\t"
115*b2055c35SXin Li "beq %[pop], %[LoopEnd], 2f \n\t"
116*b2055c35SXin Li "1: \n\t"
117*b2055c35SXin Li "lw %[temp0], 0(%[pop]) \n\t"
118*b2055c35SXin Li "lw %[temp1], 4(%[pop]) \n\t"
119*b2055c35SXin Li "addiu %[i], %[i], 1 \n\t"
120*b2055c35SXin Li "addiu %[pop], %[pop], 8 \n\t"
121*b2055c35SXin Li "madd %[i], %[temp0] \n\t"
122*b2055c35SXin Li "madd %[i], %[temp1] \n\t"
123*b2055c35SXin Li "bne %[pop], %[LoopEnd], 1b \n\t"
124*b2055c35SXin Li "2: \n\t"
125*b2055c35SXin Li "mfhi %[temp0] \n\t"
126*b2055c35SXin Li "mflo %[temp1] \n\t"
127*b2055c35SXin Li : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
128*b2055c35SXin Li [i]"=&r"(i), [pop]"+r"(pop)
129*b2055c35SXin Li : [LoopEnd]"r"(LoopEnd)
130*b2055c35SXin Li : "memory", "hi", "lo"
131*b2055c35SXin Li );
132*b2055c35SXin Li
133*b2055c35SXin Li return ((int64_t)temp0 << 32 | temp1);
134*b2055c35SXin Li }
135*b2055c35SXin Li
136*b2055c35SXin Li // C version of this function:
137*b2055c35SXin Li // int i = 0;
138*b2055c35SXin Li // int64_t cost = 0;
139*b2055c35SXin Li // const uint32_t* pX = &X[4];
140*b2055c35SXin Li // const uint32_t* pY = &Y[4];
141*b2055c35SXin Li // const uint32_t* LoopEnd = &X[length];
142*b2055c35SXin Li // while (pX != LoopEnd) {
143*b2055c35SXin Li // const uint32_t xy0 = *pX + *pY;
144*b2055c35SXin Li // const uint32_t xy1 = *(pX + 1) + *(pY + 1);
145*b2055c35SXin Li // ++i;
146*b2055c35SXin Li // cost += i * xy0;
147*b2055c35SXin Li // cost += i * xy1;
148*b2055c35SXin Li // pX += 2;
149*b2055c35SXin Li // pY += 2;
150*b2055c35SXin Li // }
151*b2055c35SXin Li // return cost;
ExtraCostCombined_MIPS32(const uint32_t * const X,const uint32_t * const Y,int length)152*b2055c35SXin Li static uint32_t ExtraCostCombined_MIPS32(const uint32_t* const X,
153*b2055c35SXin Li const uint32_t* const Y, int length) {
154*b2055c35SXin Li int i, temp0, temp1, temp2, temp3;
155*b2055c35SXin Li const uint32_t* pX = &X[4];
156*b2055c35SXin Li const uint32_t* pY = &Y[4];
157*b2055c35SXin Li const uint32_t* const LoopEnd = &X[length];
158*b2055c35SXin Li
159*b2055c35SXin Li __asm__ volatile(
160*b2055c35SXin Li "mult $zero, $zero \n\t"
161*b2055c35SXin Li "xor %[i], %[i], %[i] \n\t"
162*b2055c35SXin Li "beq %[pX], %[LoopEnd], 2f \n\t"
163*b2055c35SXin Li "1: \n\t"
164*b2055c35SXin Li "lw %[temp0], 0(%[pX]) \n\t"
165*b2055c35SXin Li "lw %[temp1], 0(%[pY]) \n\t"
166*b2055c35SXin Li "lw %[temp2], 4(%[pX]) \n\t"
167*b2055c35SXin Li "lw %[temp3], 4(%[pY]) \n\t"
168*b2055c35SXin Li "addiu %[i], %[i], 1 \n\t"
169*b2055c35SXin Li "addu %[temp0], %[temp0], %[temp1] \n\t"
170*b2055c35SXin Li "addu %[temp2], %[temp2], %[temp3] \n\t"
171*b2055c35SXin Li "addiu %[pX], %[pX], 8 \n\t"
172*b2055c35SXin Li "addiu %[pY], %[pY], 8 \n\t"
173*b2055c35SXin Li "madd %[i], %[temp0] \n\t"
174*b2055c35SXin Li "madd %[i], %[temp2] \n\t"
175*b2055c35SXin Li "bne %[pX], %[LoopEnd], 1b \n\t"
176*b2055c35SXin Li "2: \n\t"
177*b2055c35SXin Li "mfhi %[temp0] \n\t"
178*b2055c35SXin Li "mflo %[temp1] \n\t"
179*b2055c35SXin Li : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
180*b2055c35SXin Li [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
181*b2055c35SXin Li [i]"=&r"(i), [pX]"+r"(pX), [pY]"+r"(pY)
182*b2055c35SXin Li : [LoopEnd]"r"(LoopEnd)
183*b2055c35SXin Li : "memory", "hi", "lo"
184*b2055c35SXin Li );
185*b2055c35SXin Li
186*b2055c35SXin Li return ((int64_t)temp0 << 32 | temp1);
187*b2055c35SXin Li }
188*b2055c35SXin Li
189*b2055c35SXin Li #define HUFFMAN_COST_PASS \
190*b2055c35SXin Li __asm__ volatile( \
191*b2055c35SXin Li "sll %[temp1], %[temp0], 3 \n\t" \
192*b2055c35SXin Li "addiu %[temp3], %[streak], -3 \n\t" \
193*b2055c35SXin Li "addu %[temp2], %[pstreaks], %[temp1] \n\t" \
194*b2055c35SXin Li "blez %[temp3], 1f \n\t" \
195*b2055c35SXin Li "srl %[temp1], %[temp1], 1 \n\t" \
196*b2055c35SXin Li "addu %[temp3], %[pcnts], %[temp1] \n\t" \
197*b2055c35SXin Li "lw %[temp0], 4(%[temp2]) \n\t" \
198*b2055c35SXin Li "lw %[temp1], 0(%[temp3]) \n\t" \
199*b2055c35SXin Li "addu %[temp0], %[temp0], %[streak] \n\t" \
200*b2055c35SXin Li "addiu %[temp1], %[temp1], 1 \n\t" \
201*b2055c35SXin Li "sw %[temp0], 4(%[temp2]) \n\t" \
202*b2055c35SXin Li "sw %[temp1], 0(%[temp3]) \n\t" \
203*b2055c35SXin Li "b 2f \n\t" \
204*b2055c35SXin Li "1: \n\t" \
205*b2055c35SXin Li "lw %[temp0], 0(%[temp2]) \n\t" \
206*b2055c35SXin Li "addu %[temp0], %[temp0], %[streak] \n\t" \
207*b2055c35SXin Li "sw %[temp0], 0(%[temp2]) \n\t" \
208*b2055c35SXin Li "2: \n\t" \
209*b2055c35SXin Li : [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), \
210*b2055c35SXin Li [temp3]"=&r"(temp3), [temp0]"+r"(temp0) \
211*b2055c35SXin Li : [pstreaks]"r"(pstreaks), [pcnts]"r"(pcnts), \
212*b2055c35SXin Li [streak]"r"(streak) \
213*b2055c35SXin Li : "memory" \
214*b2055c35SXin Li );
215*b2055c35SXin Li
216*b2055c35SXin Li // Returns the various RLE counts
GetEntropyUnrefinedHelper(uint32_t val,int i,uint32_t * const val_prev,int * const i_prev,VP8LBitEntropy * const bit_entropy,VP8LStreaks * const stats)217*b2055c35SXin Li static WEBP_INLINE void GetEntropyUnrefinedHelper(
218*b2055c35SXin Li uint32_t val, int i, uint32_t* const val_prev, int* const i_prev,
219*b2055c35SXin Li VP8LBitEntropy* const bit_entropy, VP8LStreaks* const stats) {
220*b2055c35SXin Li int* const pstreaks = &stats->streaks[0][0];
221*b2055c35SXin Li int* const pcnts = &stats->counts[0];
222*b2055c35SXin Li int temp0, temp1, temp2, temp3;
223*b2055c35SXin Li const int streak = i - *i_prev;
224*b2055c35SXin Li
225*b2055c35SXin Li // Gather info for the bit entropy.
226*b2055c35SXin Li if (*val_prev != 0) {
227*b2055c35SXin Li bit_entropy->sum += (*val_prev) * streak;
228*b2055c35SXin Li bit_entropy->nonzeros += streak;
229*b2055c35SXin Li bit_entropy->nonzero_code = *i_prev;
230*b2055c35SXin Li bit_entropy->entropy -= VP8LFastSLog2(*val_prev) * streak;
231*b2055c35SXin Li if (bit_entropy->max_val < *val_prev) {
232*b2055c35SXin Li bit_entropy->max_val = *val_prev;
233*b2055c35SXin Li }
234*b2055c35SXin Li }
235*b2055c35SXin Li
236*b2055c35SXin Li // Gather info for the Huffman cost.
237*b2055c35SXin Li temp0 = (*val_prev != 0);
238*b2055c35SXin Li HUFFMAN_COST_PASS
239*b2055c35SXin Li
240*b2055c35SXin Li *val_prev = val;
241*b2055c35SXin Li *i_prev = i;
242*b2055c35SXin Li }
243*b2055c35SXin Li
GetEntropyUnrefined_MIPS32(const uint32_t X[],int length,VP8LBitEntropy * const bit_entropy,VP8LStreaks * const stats)244*b2055c35SXin Li static void GetEntropyUnrefined_MIPS32(const uint32_t X[], int length,
245*b2055c35SXin Li VP8LBitEntropy* const bit_entropy,
246*b2055c35SXin Li VP8LStreaks* const stats) {
247*b2055c35SXin Li int i;
248*b2055c35SXin Li int i_prev = 0;
249*b2055c35SXin Li uint32_t x_prev = X[0];
250*b2055c35SXin Li
251*b2055c35SXin Li memset(stats, 0, sizeof(*stats));
252*b2055c35SXin Li VP8LBitEntropyInit(bit_entropy);
253*b2055c35SXin Li
254*b2055c35SXin Li for (i = 1; i < length; ++i) {
255*b2055c35SXin Li const uint32_t x = X[i];
256*b2055c35SXin Li if (x != x_prev) {
257*b2055c35SXin Li GetEntropyUnrefinedHelper(x, i, &x_prev, &i_prev, bit_entropy, stats);
258*b2055c35SXin Li }
259*b2055c35SXin Li }
260*b2055c35SXin Li GetEntropyUnrefinedHelper(0, i, &x_prev, &i_prev, bit_entropy, stats);
261*b2055c35SXin Li
262*b2055c35SXin Li bit_entropy->entropy += VP8LFastSLog2(bit_entropy->sum);
263*b2055c35SXin Li }
264*b2055c35SXin Li
GetCombinedEntropyUnrefined_MIPS32(const uint32_t X[],const uint32_t Y[],int length,VP8LBitEntropy * const entropy,VP8LStreaks * const stats)265*b2055c35SXin Li static void GetCombinedEntropyUnrefined_MIPS32(const uint32_t X[],
266*b2055c35SXin Li const uint32_t Y[],
267*b2055c35SXin Li int length,
268*b2055c35SXin Li VP8LBitEntropy* const entropy,
269*b2055c35SXin Li VP8LStreaks* const stats) {
270*b2055c35SXin Li int i = 1;
271*b2055c35SXin Li int i_prev = 0;
272*b2055c35SXin Li uint32_t xy_prev = X[0] + Y[0];
273*b2055c35SXin Li
274*b2055c35SXin Li memset(stats, 0, sizeof(*stats));
275*b2055c35SXin Li VP8LBitEntropyInit(entropy);
276*b2055c35SXin Li
277*b2055c35SXin Li for (i = 1; i < length; ++i) {
278*b2055c35SXin Li const uint32_t xy = X[i] + Y[i];
279*b2055c35SXin Li if (xy != xy_prev) {
280*b2055c35SXin Li GetEntropyUnrefinedHelper(xy, i, &xy_prev, &i_prev, entropy, stats);
281*b2055c35SXin Li }
282*b2055c35SXin Li }
283*b2055c35SXin Li GetEntropyUnrefinedHelper(0, i, &xy_prev, &i_prev, entropy, stats);
284*b2055c35SXin Li
285*b2055c35SXin Li entropy->entropy += VP8LFastSLog2(entropy->sum);
286*b2055c35SXin Li }
287*b2055c35SXin Li
288*b2055c35SXin Li #define ASM_START \
289*b2055c35SXin Li __asm__ volatile( \
290*b2055c35SXin Li ".set push \n\t" \
291*b2055c35SXin Li ".set at \n\t" \
292*b2055c35SXin Li ".set macro \n\t" \
293*b2055c35SXin Li "1: \n\t"
294*b2055c35SXin Li
295*b2055c35SXin Li // P2 = P0 + P1
296*b2055c35SXin Li // A..D - offsets
297*b2055c35SXin Li // E - temp variable to tell macro
298*b2055c35SXin Li // if pointer should be incremented
299*b2055c35SXin Li // literal_ and successive histograms could be unaligned
300*b2055c35SXin Li // so we must use ulw and usw
301*b2055c35SXin Li #define ADD_TO_OUT(A, B, C, D, E, P0, P1, P2) \
302*b2055c35SXin Li "ulw %[temp0], " #A "(%[" #P0 "]) \n\t" \
303*b2055c35SXin Li "ulw %[temp1], " #B "(%[" #P0 "]) \n\t" \
304*b2055c35SXin Li "ulw %[temp2], " #C "(%[" #P0 "]) \n\t" \
305*b2055c35SXin Li "ulw %[temp3], " #D "(%[" #P0 "]) \n\t" \
306*b2055c35SXin Li "ulw %[temp4], " #A "(%[" #P1 "]) \n\t" \
307*b2055c35SXin Li "ulw %[temp5], " #B "(%[" #P1 "]) \n\t" \
308*b2055c35SXin Li "ulw %[temp6], " #C "(%[" #P1 "]) \n\t" \
309*b2055c35SXin Li "ulw %[temp7], " #D "(%[" #P1 "]) \n\t" \
310*b2055c35SXin Li "addu %[temp4], %[temp4], %[temp0] \n\t" \
311*b2055c35SXin Li "addu %[temp5], %[temp5], %[temp1] \n\t" \
312*b2055c35SXin Li "addu %[temp6], %[temp6], %[temp2] \n\t" \
313*b2055c35SXin Li "addu %[temp7], %[temp7], %[temp3] \n\t" \
314*b2055c35SXin Li "addiu %[" #P0 "], %[" #P0 "], 16 \n\t" \
315*b2055c35SXin Li ".if " #E " == 1 \n\t" \
316*b2055c35SXin Li "addiu %[" #P1 "], %[" #P1 "], 16 \n\t" \
317*b2055c35SXin Li ".endif \n\t" \
318*b2055c35SXin Li "usw %[temp4], " #A "(%[" #P2 "]) \n\t" \
319*b2055c35SXin Li "usw %[temp5], " #B "(%[" #P2 "]) \n\t" \
320*b2055c35SXin Li "usw %[temp6], " #C "(%[" #P2 "]) \n\t" \
321*b2055c35SXin Li "usw %[temp7], " #D "(%[" #P2 "]) \n\t" \
322*b2055c35SXin Li "addiu %[" #P2 "], %[" #P2 "], 16 \n\t" \
323*b2055c35SXin Li "bne %[" #P0 "], %[LoopEnd], 1b \n\t" \
324*b2055c35SXin Li ".set pop \n\t" \
325*b2055c35SXin Li
326*b2055c35SXin Li #define ASM_END_COMMON_0 \
327*b2055c35SXin Li : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), \
328*b2055c35SXin Li [temp2]"=&r"(temp2), [temp3]"=&r"(temp3), \
329*b2055c35SXin Li [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), \
330*b2055c35SXin Li [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), \
331*b2055c35SXin Li [pa]"+r"(pa), [pout]"+r"(pout)
332*b2055c35SXin Li
333*b2055c35SXin Li #define ASM_END_COMMON_1 \
334*b2055c35SXin Li : [LoopEnd]"r"(LoopEnd) \
335*b2055c35SXin Li : "memory", "at" \
336*b2055c35SXin Li );
337*b2055c35SXin Li
338*b2055c35SXin Li #define ASM_END_0 \
339*b2055c35SXin Li ASM_END_COMMON_0 \
340*b2055c35SXin Li , [pb]"+r"(pb) \
341*b2055c35SXin Li ASM_END_COMMON_1
342*b2055c35SXin Li
343*b2055c35SXin Li #define ASM_END_1 \
344*b2055c35SXin Li ASM_END_COMMON_0 \
345*b2055c35SXin Li ASM_END_COMMON_1
346*b2055c35SXin Li
AddVector_MIPS32(const uint32_t * pa,const uint32_t * pb,uint32_t * pout,int size)347*b2055c35SXin Li static void AddVector_MIPS32(const uint32_t* pa, const uint32_t* pb,
348*b2055c35SXin Li uint32_t* pout, int size) {
349*b2055c35SXin Li uint32_t temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
350*b2055c35SXin Li const int end = ((size) / 4) * 4;
351*b2055c35SXin Li const uint32_t* const LoopEnd = pa + end;
352*b2055c35SXin Li int i;
353*b2055c35SXin Li ASM_START
354*b2055c35SXin Li ADD_TO_OUT(0, 4, 8, 12, 1, pa, pb, pout)
355*b2055c35SXin Li ASM_END_0
356*b2055c35SXin Li for (i = 0; i < size - end; ++i) pout[i] = pa[i] + pb[i];
357*b2055c35SXin Li }
358*b2055c35SXin Li
AddVectorEq_MIPS32(const uint32_t * pa,uint32_t * pout,int size)359*b2055c35SXin Li static void AddVectorEq_MIPS32(const uint32_t* pa, uint32_t* pout, int size) {
360*b2055c35SXin Li uint32_t temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
361*b2055c35SXin Li const int end = ((size) / 4) * 4;
362*b2055c35SXin Li const uint32_t* const LoopEnd = pa + end;
363*b2055c35SXin Li int i;
364*b2055c35SXin Li ASM_START
365*b2055c35SXin Li ADD_TO_OUT(0, 4, 8, 12, 0, pa, pout, pout)
366*b2055c35SXin Li ASM_END_1
367*b2055c35SXin Li for (i = 0; i < size - end; ++i) pout[i] += pa[i];
368*b2055c35SXin Li }
369*b2055c35SXin Li
370*b2055c35SXin Li #undef ASM_END_1
371*b2055c35SXin Li #undef ASM_END_0
372*b2055c35SXin Li #undef ASM_END_COMMON_1
373*b2055c35SXin Li #undef ASM_END_COMMON_0
374*b2055c35SXin Li #undef ADD_TO_OUT
375*b2055c35SXin Li #undef ASM_START
376*b2055c35SXin Li
377*b2055c35SXin Li //------------------------------------------------------------------------------
378*b2055c35SXin Li // Entry point
379*b2055c35SXin Li
380*b2055c35SXin Li extern void VP8LEncDspInitMIPS32(void);
381*b2055c35SXin Li
VP8LEncDspInitMIPS32(void)382*b2055c35SXin Li WEBP_TSAN_IGNORE_FUNCTION void VP8LEncDspInitMIPS32(void) {
383*b2055c35SXin Li VP8LFastSLog2Slow = FastSLog2Slow_MIPS32;
384*b2055c35SXin Li VP8LFastLog2Slow = FastLog2Slow_MIPS32;
385*b2055c35SXin Li VP8LExtraCost = ExtraCost_MIPS32;
386*b2055c35SXin Li VP8LExtraCostCombined = ExtraCostCombined_MIPS32;
387*b2055c35SXin Li VP8LGetEntropyUnrefined = GetEntropyUnrefined_MIPS32;
388*b2055c35SXin Li VP8LGetCombinedEntropyUnrefined = GetCombinedEntropyUnrefined_MIPS32;
389*b2055c35SXin Li VP8LAddVector = AddVector_MIPS32;
390*b2055c35SXin Li VP8LAddVectorEq = AddVectorEq_MIPS32;
391*b2055c35SXin Li }
392*b2055c35SXin Li
393*b2055c35SXin Li #else // !WEBP_USE_MIPS32
394*b2055c35SXin Li
395*b2055c35SXin Li WEBP_DSP_INIT_STUB(VP8LEncDspInitMIPS32)
396*b2055c35SXin Li
397*b2055c35SXin Li #endif // WEBP_USE_MIPS32
398