xref: /aosp_15_r20/external/libopus/celt/mips/fixed_generic_mipsr1.h (revision a58d3d2adb790c104798cd88c8a3aff4fa8b82cc)
1*a58d3d2aSXin Li /* Copyright (C) 2007-2009 Xiph.Org Foundation
2*a58d3d2aSXin Li    Copyright (C) 2003-2008 Jean-Marc Valin
3*a58d3d2aSXin Li    Copyright (C) 2007-2008 CSIRO */
4*a58d3d2aSXin Li /**
5*a58d3d2aSXin Li    @file fixed_generic.h
6*a58d3d2aSXin Li    @brief Generic fixed-point operations
7*a58d3d2aSXin Li */
8*a58d3d2aSXin Li /*
9*a58d3d2aSXin Li    Redistribution and use in source and binary forms, with or without
10*a58d3d2aSXin Li    modification, are permitted provided that the following conditions
11*a58d3d2aSXin Li    are met:
12*a58d3d2aSXin Li 
13*a58d3d2aSXin Li    - Redistributions of source code must retain the above copyright
14*a58d3d2aSXin Li    notice, this list of conditions and the following disclaimer.
15*a58d3d2aSXin Li 
16*a58d3d2aSXin Li    - Redistributions in binary form must reproduce the above copyright
17*a58d3d2aSXin Li    notice, this list of conditions and the following disclaimer in the
18*a58d3d2aSXin Li    documentation and/or other materials provided with the distribution.
19*a58d3d2aSXin Li 
20*a58d3d2aSXin Li    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21*a58d3d2aSXin Li    ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22*a58d3d2aSXin Li    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23*a58d3d2aSXin Li    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
24*a58d3d2aSXin Li    OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25*a58d3d2aSXin Li    EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26*a58d3d2aSXin Li    PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
27*a58d3d2aSXin Li    PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
28*a58d3d2aSXin Li    LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
29*a58d3d2aSXin Li    NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30*a58d3d2aSXin Li    SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31*a58d3d2aSXin Li */
32*a58d3d2aSXin Li 
33*a58d3d2aSXin Li #ifndef CELT_FIXED_GENERIC_MIPSR1_H
34*a58d3d2aSXin Li #define CELT_FIXED_GENERIC_MIPSR1_H
35*a58d3d2aSXin Li 
36*a58d3d2aSXin Li #undef MULT16_32_Q15_ADD
MULT16_32_Q15_ADD(int a,int b,int c,int d)37*a58d3d2aSXin Li static inline int MULT16_32_Q15_ADD(int a, int b, int c, int d) {
38*a58d3d2aSXin Li     int m;
39*a58d3d2aSXin Li     asm volatile("MULT $ac1, %0, %1" : : "r" ((int)a), "r" ((int)b));
40*a58d3d2aSXin Li     asm volatile("madd $ac1, %0, %1" : : "r" ((int)c), "r" ((int)d));
41*a58d3d2aSXin Li     asm volatile("EXTR.W %0,$ac1, %1" : "=r" (m): "i" (15));
42*a58d3d2aSXin Li     return m;
43*a58d3d2aSXin Li }
44*a58d3d2aSXin Li 
45*a58d3d2aSXin Li #undef MULT16_32_Q15_SUB
MULT16_32_Q15_SUB(int a,int b,int c,int d)46*a58d3d2aSXin Li static inline int MULT16_32_Q15_SUB(int a, int b, int c, int d) {
47*a58d3d2aSXin Li     int m;
48*a58d3d2aSXin Li     asm volatile("MULT $ac1, %0, %1" : : "r" ((int)a), "r" ((int)b));
49*a58d3d2aSXin Li     asm volatile("msub $ac1, %0, %1" : : "r" ((int)c), "r" ((int)d));
50*a58d3d2aSXin Li     asm volatile("EXTR.W %0,$ac1, %1" : "=r" (m): "i" (15));
51*a58d3d2aSXin Li     return m;
52*a58d3d2aSXin Li }
53*a58d3d2aSXin Li 
54*a58d3d2aSXin Li #undef MULT16_16_Q15_ADD
MULT16_16_Q15_ADD(int a,int b,int c,int d)55*a58d3d2aSXin Li static inline int MULT16_16_Q15_ADD(int a, int b, int c, int d) {
56*a58d3d2aSXin Li     int m;
57*a58d3d2aSXin Li     asm volatile("MULT $ac1, %0, %1" : : "r" ((int)a), "r" ((int)b));
58*a58d3d2aSXin Li     asm volatile("madd $ac1, %0, %1" : : "r" ((int)c), "r" ((int)d));
59*a58d3d2aSXin Li     asm volatile("EXTR.W %0,$ac1, %1" : "=r" (m): "i" (15));
60*a58d3d2aSXin Li     return m;
61*a58d3d2aSXin Li }
62*a58d3d2aSXin Li 
63*a58d3d2aSXin Li #undef MULT16_16_Q15_SUB
MULT16_16_Q15_SUB(int a,int b,int c,int d)64*a58d3d2aSXin Li static inline int MULT16_16_Q15_SUB(int a, int b, int c, int d) {
65*a58d3d2aSXin Li     int m;
66*a58d3d2aSXin Li     asm volatile("MULT $ac1, %0, %1" : : "r" ((int)a), "r" ((int)b));
67*a58d3d2aSXin Li     asm volatile("msub $ac1, %0, %1" : : "r" ((int)c), "r" ((int)d));
68*a58d3d2aSXin Li     asm volatile("EXTR.W %0,$ac1, %1" : "=r" (m): "i" (15));
69*a58d3d2aSXin Li     return m;
70*a58d3d2aSXin Li }
71*a58d3d2aSXin Li 
72*a58d3d2aSXin Li 
73*a58d3d2aSXin Li #undef MULT16_32_Q16
MULT16_32_Q16(int a,int b)74*a58d3d2aSXin Li static inline int MULT16_32_Q16(int a, int b)
75*a58d3d2aSXin Li {
76*a58d3d2aSXin Li     int c;
77*a58d3d2aSXin Li     asm volatile("MULT $ac1,%0, %1" : : "r" (a), "r" (b));
78*a58d3d2aSXin Li     asm volatile("EXTR.W %0,$ac1, %1" : "=r" (c): "i" (16));
79*a58d3d2aSXin Li     return c;
80*a58d3d2aSXin Li }
81*a58d3d2aSXin Li 
82*a58d3d2aSXin Li #undef MULT16_32_P16
MULT16_32_P16(int a,int b)83*a58d3d2aSXin Li static inline int MULT16_32_P16(int a, int b)
84*a58d3d2aSXin Li {
85*a58d3d2aSXin Li     int c;
86*a58d3d2aSXin Li     asm volatile("MULT $ac1, %0, %1" : : "r" (a), "r" (b));
87*a58d3d2aSXin Li     asm volatile("EXTR_R.W %0,$ac1, %1" : "=r" (c): "i" (16));
88*a58d3d2aSXin Li     return c;
89*a58d3d2aSXin Li }
90*a58d3d2aSXin Li 
91*a58d3d2aSXin Li #undef MULT16_32_Q15
MULT16_32_Q15(int a,int b)92*a58d3d2aSXin Li static inline int MULT16_32_Q15(int a, int b)
93*a58d3d2aSXin Li {
94*a58d3d2aSXin Li     int c;
95*a58d3d2aSXin Li     asm volatile("MULT $ac1, %0, %1" : : "r" (a), "r" (b));
96*a58d3d2aSXin Li     asm volatile("EXTR.W %0,$ac1, %1" : "=r" (c): "i" (15));
97*a58d3d2aSXin Li     return c;
98*a58d3d2aSXin Li }
99*a58d3d2aSXin Li 
100*a58d3d2aSXin Li #undef MULT32_32_Q31
MULT32_32_Q31(int a,int b)101*a58d3d2aSXin Li static inline int MULT32_32_Q31(int a, int b)
102*a58d3d2aSXin Li {
103*a58d3d2aSXin Li     int r;
104*a58d3d2aSXin Li     asm volatile("MULT $ac1, %0, %1" : : "r" (a), "r" (b));
105*a58d3d2aSXin Li     asm volatile("EXTR.W %0,$ac1, %1" : "=r" (r): "i" (31));
106*a58d3d2aSXin Li     return r;
107*a58d3d2aSXin Li }
108*a58d3d2aSXin Li 
109*a58d3d2aSXin Li #undef PSHR32
PSHR32(int a,int shift)110*a58d3d2aSXin Li static inline int PSHR32(int a, int shift)
111*a58d3d2aSXin Li {
112*a58d3d2aSXin Li     int r;
113*a58d3d2aSXin Li     asm volatile ("SHRAV_R.W %0, %1, %2" :"=r" (r): "r" (a), "r" (shift));
114*a58d3d2aSXin Li     return r;
115*a58d3d2aSXin Li }
116*a58d3d2aSXin Li 
117*a58d3d2aSXin Li #undef MULT16_16_P15
MULT16_16_P15(int a,int b)118*a58d3d2aSXin Li static inline int MULT16_16_P15(int a, int b)
119*a58d3d2aSXin Li {
120*a58d3d2aSXin Li     int r;
121*a58d3d2aSXin Li     asm volatile ("mul %0, %1, %2" :"=r" (r): "r" (a), "r" (b));
122*a58d3d2aSXin Li     asm volatile ("SHRA_R.W %0, %1, %2" : "+r" (r):  "0" (r), "i"(15));
123*a58d3d2aSXin Li     return r;
124*a58d3d2aSXin Li }
125*a58d3d2aSXin Li 
126*a58d3d2aSXin Li #endif /* CELT_FIXED_GENERIC_MIPSR1_H */
127