|
1 /* |
|
2 * LIBOIL - Library of Optimized Inner Loops |
|
3 * Copyright (c) 2005 David A. Schleef <ds@schleef.org> |
|
4 * All rights reserved. |
|
5 * |
|
6 * Redistribution and use in source and binary forms, with or without |
|
7 * modification, are permitted provided that the following conditions |
|
8 * are met: |
|
9 * 1. Redistributions of source code must retain the above copyright |
|
10 * notice, this list of conditions and the following disclaimer. |
|
11 * 2. Redistributions in binary form must reproduce the above copyright |
|
12 * notice, this list of conditions and the following disclaimer in the |
|
13 * documentation and/or other materials provided with the distribution. |
|
14 * |
|
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
|
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
|
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
|
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, |
|
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
|
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
|
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
|
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
|
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING |
|
24 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
|
25 * POSSIBILITY OF SUCH DAMAGE. |
|
26 */ |
|
27 //Portions Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies). All rights reserved. |
|
28 |
|
29 #ifdef HAVE_CONFIG_H |
|
30 #include "config.h" |
|
31 #endif |
|
32 |
|
33 #include <math.h> |
|
34 |
|
35 #include <liboil/liboil.h> |
|
36 #include <liboil/liboilfunction.h> |
|
37 #include <liboil/liboilclasses.h> |
|
38 |
|
39 |
|
40 #if 0 |
|
41 static void |
|
42 add_f32_unroll4 (float *dest, float *src1, float *src2, int n) |
|
43 { |
|
44 int i; |
|
45 |
|
46 for(i=0;i<(n&(~0x3));i+=4){ |
|
47 dest[i+0] = src1[i+0] + src2[i+0]; |
|
48 dest[i+1] = src1[i+1] + src2[i+1]; |
|
49 dest[i+2] = src1[i+2] + src2[i+2]; |
|
50 dest[i+3] = src1[i+3] + src2[i+3]; |
|
51 } |
|
52 for(;i<n;i++){ |
|
53 dest[i] = src1[i] + src2[i]; |
|
54 } |
|
55 } |
|
56 OIL_DEFINE_IMPL (add_f32_unroll4, add_f32); |
|
57 #endif |
|
58 |
|
59 #if 0 |
|
60 static void |
|
61 add_f32_unroll4b (float *dest, float *src1, float *src2, int n) |
|
62 { |
|
63 int i; |
|
64 |
|
65 for(i=0;i<(n&(~0x3));i+=4){ |
|
66 *dest++ = *src1++ + *src2++; |
|
67 *dest++ = *src1++ + *src2++; |
|
68 *dest++ = *src1++ + *src2++; |
|
69 *dest++ = *src1++ + *src2++; |
|
70 } |
|
71 for(;i<n;i++){ |
|
72 *dest++ = *src1++ + *src2++; |
|
73 } |
|
74 } |
|
75 OIL_DEFINE_IMPL (add_f32_unroll4b, add_f32); |
|
76 #endif |
|
77 |
|
78 |
|
79 #if 0 |
|
80 static void |
|
81 multiply_f32_unroll4 (float *dest, float *src1, float *src2, int n) |
|
82 { |
|
83 int i; |
|
84 |
|
85 for(i=0;i<(n&(~0x3));i+=4){ |
|
86 dest[i+0] = src1[i+0] * src2[i+0]; |
|
87 dest[i+1] = src1[i+1] * src2[i+1]; |
|
88 dest[i+2] = src1[i+2] * src2[i+2]; |
|
89 dest[i+3] = src1[i+3] * src2[i+3]; |
|
90 } |
|
91 for(;i<n;i++){ |
|
92 dest[i] = src1[i] * src2[i]; |
|
93 } |
|
94 } |
|
95 OIL_DEFINE_IMPL (multiply_f32_unroll4, multiply_f32); |
|
96 #endif |
|
97 |
|
98 static void |
|
99 scalaradd_f32_ns_unroll4 (float *dest, float *src1, float *src2, int n) |
|
100 { |
|
101 int i; |
|
102 |
|
103 for(i=0;i<(n&(~0x3));i+=4){ |
|
104 dest[i+0] = src1[i+0] + src2[0]; |
|
105 dest[i+1] = src1[i+1] + src2[0]; |
|
106 dest[i+2] = src1[i+2] + src2[0]; |
|
107 dest[i+3] = src1[i+3] + src2[0]; |
|
108 } |
|
109 for(;i<n;i++){ |
|
110 dest[i] = src1[i] + src2[0]; |
|
111 } |
|
112 } |
|
113 OIL_DEFINE_IMPL (scalaradd_f32_ns_unroll4, scalaradd_f32_ns); |
|
114 |
|
115 static void |
|
116 scalarmultiply_f32_ns_unroll4 (float *dest, float *src1, float *src2, int n) |
|
117 { |
|
118 int i; |
|
119 |
|
120 for(i=0;i<(n&(~0x3));i+=4){ |
|
121 dest[i+0] = src1[i+0] * src2[0]; |
|
122 dest[i+1] = src1[i+1] * src2[0]; |
|
123 dest[i+2] = src1[i+2] * src2[0]; |
|
124 dest[i+3] = src1[i+3] * src2[0]; |
|
125 } |
|
126 for(;i<n;i++){ |
|
127 dest[i] = src1[i] * src2[0]; |
|
128 } |
|
129 } |
|
130 OIL_DEFINE_IMPL (scalarmultiply_f32_ns_unroll4, scalarmultiply_f32_ns); |
|
131 |
|
132 static void |
|
133 scalarmultiply_f64_ns_unroll4 (double *dest, double *src1, double *src2, int n) |
|
134 { |
|
135 int i; |
|
136 |
|
137 for(i=0;i<(n&(~0x3));i+=4){ |
|
138 dest[i+0] = src1[i+0] * src2[0]; |
|
139 dest[i+1] = src1[i+1] * src2[0]; |
|
140 dest[i+2] = src1[i+2] * src2[0]; |
|
141 dest[i+3] = src1[i+3] * src2[0]; |
|
142 } |
|
143 for(;i<n;i++){ |
|
144 dest[i] = src1[i] * src2[0]; |
|
145 } |
|
146 } |
|
147 OIL_DEFINE_IMPL (scalarmultiply_f64_ns_unroll4, scalarmultiply_f64_ns); |
|
148 |
|
149 #if 0 |
|
150 |
|
151 #ifdef __SYMBIAN32__ |
|
152 |
|
153 OilFunctionImpl* __oil_function_impl_add_f32_unroll4() { |
|
154 return &_oil_function_impl_add_f32_unroll4; |
|
155 } |
|
156 #endif |
|
157 |
|
158 #ifdef __SYMBIAN32__ |
|
159 |
|
160 OilFunctionImpl* __oil_function_impl_add_f32_unroll4b() { |
|
161 return &_oil_function_impl_add_f32_unroll4b; |
|
162 } |
|
163 #endif |
|
164 |
|
165 #ifdef __SYMBIAN32__ |
|
166 |
|
167 OilFunctionImpl* __oil_function_impl_multiply_f32_unroll4() { |
|
168 return &_oil_function_impl_multiply_f32_unroll4; |
|
169 } |
|
170 #endif |
|
171 |
|
172 #endif |
|
173 #ifdef __SYMBIAN32__ |
|
174 |
|
175 OilFunctionImpl* __oil_function_impl_scalaradd_f32_ns_unroll4() { |
|
176 return &_oil_function_impl_scalaradd_f32_ns_unroll4; |
|
177 } |
|
178 #endif |
|
179 |
|
180 #ifdef __SYMBIAN32__ |
|
181 |
|
182 OilFunctionImpl* __oil_function_impl_scalarmultiply_f32_ns_unroll4() { |
|
183 return &_oil_function_impl_scalarmultiply_f32_ns_unroll4; |
|
184 } |
|
185 #endif |
|
186 |
|
187 #ifdef __SYMBIAN32__ |
|
188 |
|
189 OilFunctionImpl* __oil_function_impl_scalarmultiply_f64_ns_unroll4() { |
|
190 return &_oil_function_impl_scalarmultiply_f64_ns_unroll4; |
|
191 } |
|
192 #endif |
|
193 |