|
1 /* |
|
2 * Copyright (c) 2005 |
|
3 * Eric Anholt. All rights reserved. |
|
4 * |
|
5 * Redistribution and use in source and binary forms, with or without |
|
6 * modification, are permitted provided that the following conditions |
|
7 * are met: |
|
8 * 1. Redistributions of source code must retain the above copyright |
|
9 * notice, this list of conditions and the following disclaimer. |
|
10 * 2. Redistributions in binary form must reproduce the above copyright |
|
11 * notice, this list of conditions and the following disclaimer in the |
|
12 * documentation and/or other materials provided with the distribution. |
|
13 * |
|
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND |
|
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
|
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
|
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE |
|
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
|
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
|
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
|
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
|
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
|
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
|
24 * SUCH DAMAGE. |
|
25 */ |
|
26 //Portions Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies). All rights reserved. |
|
27 |
|
28 #ifdef HAVE_CONFIG_H |
|
29 #include "config.h" |
|
30 #endif |
|
31 #include <liboil/liboilclasses.h> |
|
32 #include <liboil/liboilfunction.h> |
|
33 #include "liboil/liboilcolorspace.h" |
|
34 |
|
35 #define COMPOSITE_ADD(d,s) oil_clamp_255((d) + (s)) |
|
36 #define COMPOSITE_OVER(d,s,m) ((d) + (s) - oil_muldiv_255((d),(m))) |
|
37 |
|
38 static void |
|
39 composite_in_argb_fast (uint32_t *dest, const uint32_t *src, |
|
40 const uint8_t *mask, int n) |
|
41 { |
|
42 for (; n > 0; n--) { |
|
43 uint32_t src1, src2; |
|
44 uint8_t m = *mask++; |
|
45 |
|
46 /* Split the pixel into two sets of two channels, and multiply by the |
|
47 * mask. |
|
48 */ |
|
49 src1 = *src & 0x00ff00ff; |
|
50 src1 *= m; |
|
51 src1 += 0x00800080; |
|
52 src1 += (src1 >> 8) & 0x00ff00ff; |
|
53 src1 >>= 8; |
|
54 src1 &= 0x00ff00ff; |
|
55 |
|
56 src2 = (*src >> 8) & 0x00ff00ff; |
|
57 src2 *= m; |
|
58 src2 += 0x00800080; |
|
59 src2 += (src2 >> 8) & 0x00ff00ff; |
|
60 src2 &= 0xff00ff00; |
|
61 |
|
62 *dest++ = src1 | src2; |
|
63 src++; |
|
64 } |
|
65 } |
|
66 OIL_DEFINE_IMPL (composite_in_argb_fast, composite_in_argb); |
|
67 |
|
68 static void |
|
69 composite_in_argb_const_src_fast (uint32_t *dest, const uint32_t *src, |
|
70 const uint8_t *mask, int n) |
|
71 { |
|
72 uint32_t src1, src2; |
|
73 |
|
74 src1 = *src & 0x00ff00ff; |
|
75 src2 = (*src >> 8) & 0x00ff00ff; |
|
76 |
|
77 for (; n > 0; n--) { |
|
78 uint32_t temp1, temp2; |
|
79 uint8_t m = *mask++; |
|
80 |
|
81 /* Split the pixel into two sets of two channels, and multiply by the |
|
82 * mask. |
|
83 */ |
|
84 temp1 = src1 * m; |
|
85 temp1 += 0x00800080; |
|
86 temp1 += (temp1 >> 8) & 0x00ff00ff; |
|
87 temp1 >>= 8; |
|
88 temp1 &= 0x00ff00ff; |
|
89 |
|
90 temp2 = src2 * m; |
|
91 temp2 += 0x00800080; |
|
92 temp2 += (temp2 >> 8) & 0x00ff00ff; |
|
93 temp2 &= 0xff00ff00; |
|
94 |
|
95 *dest++ = temp1 | temp2; |
|
96 } |
|
97 } |
|
98 OIL_DEFINE_IMPL (composite_in_argb_const_src_fast, composite_in_argb_const_src); |
|
99 |
|
100 static void |
|
101 composite_in_argb_const_mask_fast (uint32_t *dest, const uint32_t *src, |
|
102 const uint8_t *mask, int n) |
|
103 { |
|
104 uint8_t m = *mask; |
|
105 |
|
106 for (; n > 0; n--) { |
|
107 uint32_t src1, src2; |
|
108 |
|
109 /* Split the pixel into two sets of two channels, and multiply by the |
|
110 * mask. |
|
111 */ |
|
112 src1 = *src & 0x00ff00ff; |
|
113 src1 *= m; |
|
114 src1 += 0x00800080; |
|
115 src1 += (src1 >> 8) & 0x00ff00ff; |
|
116 src1 >>= 8; |
|
117 src1 &= 0x00ff00ff; |
|
118 |
|
119 src2 = (*src >> 8) & 0x00ff00ff; |
|
120 src2 *= m; |
|
121 src2 += 0x00800080; |
|
122 src2 += (src2 >> 8) & 0x00ff00ff; |
|
123 src2 &= 0xff00ff00; |
|
124 |
|
125 *dest++ = src1 | src2; |
|
126 src++; |
|
127 } |
|
128 } |
|
129 OIL_DEFINE_IMPL (composite_in_argb_const_mask_fast, |
|
130 composite_in_argb_const_mask); |
|
131 |
|
132 static void |
|
133 composite_over_argb_fast (uint32_t *dest, const uint32_t *src, int n) |
|
134 { |
|
135 for (; n > 0; n--) { |
|
136 uint32_t d = *dest, s = *src, sa; |
|
137 uint32_t s1, s2, d1, d2; |
|
138 |
|
139 sa = ~s >> 24; |
|
140 |
|
141 s1 = s & 0x00ff00ff; |
|
142 d1 = d & 0x00ff00ff; |
|
143 d1 *= sa; |
|
144 d1 += 0x00800080; |
|
145 d1 += (d1 >> 8) & 0x00ff00ff; |
|
146 d1 >>= 8; |
|
147 d1 &= 0x00ff00ff; |
|
148 d1 += s1; |
|
149 d1 |= 0x01000100 - ((d1 >> 8) & 0x00ff00ff); |
|
150 d1 &= 0x00ff00ff; |
|
151 |
|
152 s2 = (s >> 8) & 0x00ff00ff; |
|
153 d2 = (d >> 8) & 0x00ff00ff; |
|
154 d2 *= sa; |
|
155 d2 += 0x00800080; |
|
156 d2 += (d2 >> 8) & 0x00ff00ff; |
|
157 d2 >>= 8; |
|
158 d2 &= 0x00ff00ff; |
|
159 d2 += s2; |
|
160 d2 |= 0x01000100 - ((d2 >> 8) & 0x00ff00ff); |
|
161 d2 &= 0x00ff00ff; |
|
162 |
|
163 *dest++ = d1 | (d2 << 8); |
|
164 src++; |
|
165 } |
|
166 } |
|
167 OIL_DEFINE_IMPL (composite_over_argb_fast, composite_over_argb); |
|
168 |
|
169 static void |
|
170 composite_over_argb_const_src_fast (uint32_t *dest, const uint32_t *src, int n) |
|
171 { |
|
172 uint32_t s = *src; |
|
173 uint32_t sa, s1, s2; |
|
174 sa = ~s >> 24; |
|
175 |
|
176 s1 = s & 0x00ff00ff; |
|
177 s2 = (s >> 8) & 0x00ff00ff; |
|
178 |
|
179 for (; n > 0; n--) { |
|
180 uint32_t d = *dest; |
|
181 uint32_t d1, d2; |
|
182 |
|
183 d1 = d & 0x00ff00ff; |
|
184 d1 *= sa; |
|
185 d1 += 0x00800080; |
|
186 d1 += (d1 >> 8) & 0x00ff00ff; |
|
187 d1 >>= 8; |
|
188 d1 &= 0x00ff00ff; |
|
189 d1 += s1; |
|
190 d1 |= 0x01000100 - ((d1 >> 8) & 0x00ff00ff); |
|
191 d1 &= 0x00ff00ff; |
|
192 |
|
193 d2 = (d >> 8) & 0x00ff00ff; |
|
194 d2 *= sa; |
|
195 d2 += 0x00800080; |
|
196 d2 += (d2 >> 8) & 0x00ff00ff; |
|
197 d2 >>= 8; |
|
198 d2 &= 0x00ff00ff; |
|
199 d2 += s2; |
|
200 d2 |= 0x01000100 - ((d2 >> 8) & 0x00ff00ff); |
|
201 d2 &= 0x00ff00ff; |
|
202 |
|
203 *dest++ = d1 | (d2 << 8); |
|
204 src++; |
|
205 } |
|
206 } |
|
207 OIL_DEFINE_IMPL (composite_over_argb_const_src_fast, |
|
208 composite_over_argb_const_src); |
|
209 |
|
210 static void |
|
211 composite_add_argb_fast (uint32_t *dest, const uint32_t *src, int n) |
|
212 { |
|
213 for (; n > 0; n--) { |
|
214 uint32_t s = *src++, d = *dest; |
|
215 uint32_t s1, s2, d1, d2; |
|
216 |
|
217 s1 = s & 0x00ff00ff; |
|
218 s2 = (s >> 8) & 0x00ff00ff; |
|
219 d1 = d & 0x00ff00ff; |
|
220 d2 = (d >> 8) & 0x00ff00ff; |
|
221 |
|
222 d1 += s1; |
|
223 d1 |= 0x01000100 - ((d1 >> 8) & 0x00ff00ff); |
|
224 d1 &= 0x00ff00ff; |
|
225 |
|
226 d2 += s2; |
|
227 d2 |= 0x01000100 - ((d2 >> 8) & 0x00ff00ff); |
|
228 d2 &= 0x00ff00ff; |
|
229 |
|
230 *dest++ = d1 | (d2 << 8); |
|
231 } |
|
232 } |
|
233 OIL_DEFINE_IMPL (composite_add_argb_fast, composite_add_argb); |
|
234 |
|
235 static void |
|
236 composite_add_argb_const_src_fast (uint32_t *dest, const uint32_t *src, int n) |
|
237 { |
|
238 uint32_t s1, s2; |
|
239 |
|
240 s1 = *src & 0x00ff00ff; |
|
241 s2 = (*src >> 8) & 0x00ff00ff; |
|
242 for (; n > 0; n--) { |
|
243 uint32_t d = *dest; |
|
244 uint32_t d1, d2; |
|
245 |
|
246 d1 = d & 0x00ff00ff; |
|
247 d2 = (d >> 8) & 0x00ff00ff; |
|
248 |
|
249 d1 += s1; |
|
250 d1 |= 0x01000100 - ((d1 >> 8) & 0x00ff00ff); |
|
251 d1 &= 0x00ff00ff; |
|
252 |
|
253 d2 += s2; |
|
254 d2 |= 0x01000100 - ((d2 >> 8) & 0x00ff00ff); |
|
255 d2 &= 0x00ff00ff; |
|
256 |
|
257 *dest++ = d1 | (d2 << 8); |
|
258 } |
|
259 } |
|
260 OIL_DEFINE_IMPL (composite_add_argb_const_src_fast, |
|
261 composite_add_argb_const_src); |
|
262 |
|
263 static void |
|
264 composite_in_over_argb_fast (uint32_t *dest, const uint32_t *src, |
|
265 const uint8_t *mask, int n) |
|
266 { |
|
267 for (; n > 0; n--) { |
|
268 uint32_t d = *dest, s = *src++; |
|
269 uint32_t s1, s2, d1, d2, sa; |
|
270 uint8_t m = *mask++; |
|
271 |
|
272 s1 = s & 0x00ff00ff; |
|
273 s2 = (s >> 8) & 0x00ff00ff; |
|
274 |
|
275 /* in */ |
|
276 s1 *= m; |
|
277 s1 += 0x00800080; |
|
278 s1 += (s1 >> 8) & 0x00ff00ff; |
|
279 s1 >>= 8; |
|
280 s1 &= 0x00ff00ff; |
|
281 |
|
282 s2 *= m; |
|
283 s2 += 0x00800080; |
|
284 s2 += (s2 >> 8) & 0x00ff00ff; |
|
285 s2 >>= 8; |
|
286 s2 &= 0x00ff00ff; |
|
287 |
|
288 /* over */ |
|
289 sa = (~s2 >> 16) & 0xff; |
|
290 |
|
291 d1 = d & 0x00ff00ff; |
|
292 d1 *= sa; |
|
293 d1 += 0x00800080; |
|
294 d1 += (d1 >> 8) & 0x00ff00ff; |
|
295 d1 >>= 8; |
|
296 d1 &= 0x00ff00ff; |
|
297 d1 += s1; |
|
298 d1 |= 0x01000100 - ((d1 >> 8) & 0x00ff00ff); |
|
299 d1 &= 0x00ff00ff; |
|
300 |
|
301 d2 = (d >> 8) & 0x00ff00ff; |
|
302 d2 *= sa; |
|
303 d2 += 0x00800080; |
|
304 d2 += (d2 >> 8) & 0x00ff00ff; |
|
305 d2 >>= 8; |
|
306 d2 &= 0x00ff00ff; |
|
307 d2 += s2; |
|
308 d2 |= 0x01000100 - ((d2 >> 8) & 0x00ff00ff); |
|
309 d2 &= 0x00ff00ff; |
|
310 |
|
311 *dest++ = d1 | (d2 << 8); |
|
312 } |
|
313 } |
|
314 OIL_DEFINE_IMPL (composite_in_over_argb_fast, composite_in_over_argb); |
|
315 |
|
316 static void |
|
317 composite_in_over_argb_const_src_fast (uint32_t *dest, const uint32_t *src, |
|
318 const uint8_t *mask, int n) |
|
319 { |
|
320 uint32_t s = *src; |
|
321 uint32_t s1, s2; |
|
322 |
|
323 s1 = s & 0x00ff00ff; |
|
324 s2 = (s >> 8) & 0x00ff00ff; |
|
325 |
|
326 for (; n > 0; n--) { |
|
327 uint32_t d = *dest; |
|
328 uint32_t temp1, temp2, d1, d2, sa; |
|
329 uint8_t m = *mask++; |
|
330 |
|
331 /* in */ |
|
332 temp1 = s1 * m; |
|
333 temp1 += 0x00800080; |
|
334 temp1 += (temp1 >> 8) & 0x00ff00ff; |
|
335 temp1 >>= 8; |
|
336 temp1 &= 0x00ff00ff; |
|
337 |
|
338 temp2 = s2 * m; |
|
339 temp2 += 0x00800080; |
|
340 temp2 += (temp2 >> 8) & 0x00ff00ff; |
|
341 temp2 >>= 8; |
|
342 temp2 &= 0x00ff00ff; |
|
343 |
|
344 /* over */ |
|
345 sa = (~temp2 >> 16) & 0xff; |
|
346 |
|
347 d1 = d & 0x00ff00ff; |
|
348 d1 *= sa; |
|
349 d1 += 0x00800080; |
|
350 d1 += (d1 >> 8) & 0x00ff00ff; |
|
351 d1 >>= 8; |
|
352 d1 &= 0x00ff00ff; |
|
353 d1 += temp1; |
|
354 d1 |= 0x01000100 - ((d1 >> 8) & 0x00ff00ff); |
|
355 d1 &= 0x00ff00ff; |
|
356 |
|
357 d2 = (d >> 8) & 0x00ff00ff; |
|
358 d2 *= sa; |
|
359 d2 += 0x00800080; |
|
360 d2 += (d2 >> 8) & 0x00ff00ff; |
|
361 d2 >>= 8; |
|
362 d2 &= 0x00ff00ff; |
|
363 d2 += temp2; |
|
364 d2 |= 0x01000100 - ((d2 >> 8) & 0x00ff00ff); |
|
365 d2 &= 0x00ff00ff; |
|
366 |
|
367 *dest++ = d1 | (d2 << 8); |
|
368 } |
|
369 } |
|
370 OIL_DEFINE_IMPL (composite_in_over_argb_const_src_fast, |
|
371 composite_in_over_argb_const_src); |
|
372 |
|
373 static void |
|
374 composite_in_over_argb_const_mask_fast (uint32_t *dest, const uint32_t *src, |
|
375 const uint8_t *mask, int n) |
|
376 { |
|
377 uint8_t m = *mask; |
|
378 for (; n > 0; n--) { |
|
379 uint32_t d = *dest, s = *src++; |
|
380 uint32_t s1, s2, d1, d2, sa; |
|
381 |
|
382 s1 = s & 0x00ff00ff; |
|
383 s2 = (s >> 8) & 0x00ff00ff; |
|
384 |
|
385 /* in */ |
|
386 s1 *= m; |
|
387 s1 += 0x00800080; |
|
388 s1 += (s1 >> 8) & 0x00ff00ff; |
|
389 s1 >>= 8; |
|
390 s1 &= 0x00ff00ff; |
|
391 |
|
392 s2 *= m; |
|
393 s2 += 0x00800080; |
|
394 s2 += (s2 >> 8) & 0x00ff00ff; |
|
395 s2 >>= 8; |
|
396 s2 &= 0x00ff00ff; |
|
397 |
|
398 /* over */ |
|
399 sa = (~s2 >> 16) & 0xff; |
|
400 |
|
401 d1 = d & 0x00ff00ff; |
|
402 d1 *= sa; |
|
403 d1 += 0x00800080; |
|
404 d1 += (d1 >> 8) & 0x00ff00ff; |
|
405 d1 >>= 8; |
|
406 d1 &= 0x00ff00ff; |
|
407 d1 += s1; |
|
408 d1 |= 0x01000100 - ((d1 >> 8) & 0x00ff00ff); |
|
409 d1 &= 0x00ff00ff; |
|
410 |
|
411 d2 = (d >> 8) & 0x00ff00ff; |
|
412 d2 *= sa; |
|
413 d2 += 0x00800080; |
|
414 d2 += (d2 >> 8) & 0x00ff00ff; |
|
415 d2 >>= 8; |
|
416 d2 &= 0x00ff00ff; |
|
417 d2 += s2; |
|
418 d2 |= 0x01000100 - ((d2 >> 8) & 0x00ff00ff); |
|
419 d2 &= 0x00ff00ff; |
|
420 |
|
421 *dest++ = d1 | (d2 << 8); |
|
422 } |
|
423 } |
|
424 OIL_DEFINE_IMPL (composite_in_over_argb_const_mask_fast, |
|
425 composite_in_over_argb_const_mask); |
|
426 |
|
427 #ifdef HAVE_UNALIGNED_ACCESS |
|
428 static void |
|
429 composite_add_u8_fast (uint8_t *dest, const uint8_t *src, int n) |
|
430 { |
|
431 for (; n > 3; n-= 4) { |
|
432 uint32_t s = *(uint32_t *)src, d = *(uint32_t *)dest; |
|
433 uint32_t s1, s2, d1, d2; |
|
434 |
|
435 s1 = s & 0x00ff00ff; |
|
436 s2 = (s >> 8) & 0x00ff00ff; |
|
437 d1 = d & 0x00ff00ff; |
|
438 d2 = (d >> 8) & 0x00ff00ff; |
|
439 |
|
440 d1 += s1; |
|
441 d1 |= 0x01000100 - ((d1 >> 8) & 0x00ff00ff); |
|
442 d1 &= 0x00ff00ff; |
|
443 |
|
444 d2 += s2; |
|
445 d2 |= 0x01000100 - ((d2 >> 8) & 0x00ff00ff); |
|
446 d2 &= 0x00ff00ff; |
|
447 |
|
448 *(uint32_t *)dest = d1 | (d2 << 8); |
|
449 src += 4; |
|
450 dest += 4; |
|
451 } |
|
452 for (; n > 0; n--) { |
|
453 *dest = COMPOSITE_ADD(*dest, *src); |
|
454 src++; |
|
455 dest++; |
|
456 } |
|
457 } |
|
458 OIL_DEFINE_IMPL (composite_add_u8_fast, composite_add_u8); |
|
459 #endif |
|
460 |
|
461 #ifdef HAVE_UNALIGNED_ACCESS |
|
462 static void |
|
463 composite_add_u8_const_src_fast (uint8_t *dest, const uint8_t *src, int n) |
|
464 { |
|
465 uint32_t s; |
|
466 |
|
467 s = *src | (*src << 16); |
|
468 for (; n > 3; n-= 4) { |
|
469 uint32_t d = *(uint32_t *)dest; |
|
470 uint32_t d1, d2; |
|
471 |
|
472 d1 = d & 0x00ff00ff; |
|
473 d2 = (d >> 8) & 0x00ff00ff; |
|
474 |
|
475 d1 += s; |
|
476 d1 |= 0x01000100 - ((d1 >> 8) & 0x00ff00ff); |
|
477 d1 &= 0x00ff00ff; |
|
478 |
|
479 d2 += s; |
|
480 d2 |= 0x01000100 - ((d2 >> 8) & 0x00ff00ff); |
|
481 d2 &= 0x00ff00ff; |
|
482 |
|
483 *(uint32_t *)dest = d1 | (d2 << 8); |
|
484 dest += 4; |
|
485 } |
|
486 for (; n > 0; n--) { |
|
487 *dest = COMPOSITE_ADD(*dest, *src); |
|
488 dest++; |
|
489 } |
|
490 } |
|
491 OIL_DEFINE_IMPL (composite_add_u8_const_src_fast, composite_add_u8_const_src); |
|
492 #endif |
|
493 |
|
494 #ifdef HAVE_UNALIGNED_ACCESS |
|
495 static void |
|
496 composite_over_u8_fast (uint8_t *dest, const uint8_t *src, int n) |
|
497 { |
|
498 for (; n > 3; n-= 4) { |
|
499 uint32_t d = *(uint32_t *)dest, s = *(uint32_t *)src; |
|
500 uint32_t d1, d2, s1, s2; |
|
501 |
|
502 d1 = d & 0x00ff00ff; |
|
503 d2 = (d >> 8) & 0x00ff00ff; |
|
504 s1 = s & 0x00ff00ff; |
|
505 s2 = (s >> 8) & 0x00ff00ff; |
|
506 |
|
507 d1 = ((d1 & 0xff) * (~s1 & 0xff)) | |
|
508 ((d1 & 0x00ff0000) * (~s1 >> 16 & 0xff)); |
|
509 d1 += 0x00800080; |
|
510 d1 += (d1 >> 8) & 0x00ff00ff; |
|
511 d1 >>= 8; |
|
512 d1 &= 0x00ff00ff; |
|
513 d1 += s1; |
|
514 d1 |= 0x01000100 - ((d1 >> 8) & 0x00ff00ff); |
|
515 d1 &= 0x00ff00ff; |
|
516 |
|
517 d2 = ((d2 & 0xff) * (~s2 & 0xff)) | |
|
518 ((d2 & 0x00ff0000) * (~s2 >> 16 & 0xff)); |
|
519 d2 += 0x00800080; |
|
520 d2 += (d2 >> 8) & 0x00ff00ff; |
|
521 d2 >>= 8; |
|
522 d2 &= 0x00ff00ff; |
|
523 d2 += s2; |
|
524 d2 |= 0x01000100 - ((d2 >> 8) & 0x00ff00ff); |
|
525 d2 &= 0x00ff00ff; |
|
526 |
|
527 *(uint32_t *)dest = d1 | (d2 << 8); |
|
528 dest += 4; |
|
529 src += 4; |
|
530 } |
|
531 for (; n > 0; n--) { |
|
532 *dest = COMPOSITE_OVER(*dest, *src, *src); |
|
533 dest++; |
|
534 src++; |
|
535 } |
|
536 } |
|
537 OIL_DEFINE_IMPL (composite_over_u8_fast, composite_over_u8); |
|
538 #endif |
|
539 |
|
540 |
|
541 #ifdef __SYMBIAN32__ |
|
542 |
|
543 OilFunctionImpl* __oil_function_impl_composite_in_argb_fast() { |
|
544 return &_oil_function_impl_composite_in_argb_fast; |
|
545 } |
|
546 #endif |
|
547 |
|
548 #ifdef __SYMBIAN32__ |
|
549 |
|
550 OilFunctionImpl* __oil_function_impl_composite_in_argb_const_src_fast() { |
|
551 return &_oil_function_impl_composite_in_argb_const_src_fast; |
|
552 } |
|
553 #endif |
|
554 |
|
555 #ifdef __SYMBIAN32__ |
|
556 |
|
557 OilFunctionImpl* __oil_function_impl_composite_in_argb_const_mask_fast() { |
|
558 return &_oil_function_impl_composite_in_argb_const_mask_fast; |
|
559 } |
|
560 #endif |
|
561 |
|
562 #ifdef __SYMBIAN32__ |
|
563 |
|
564 OilFunctionImpl* __oil_function_impl_composite_over_argb_fast() { |
|
565 return &_oil_function_impl_composite_over_argb_fast; |
|
566 } |
|
567 #endif |
|
568 |
|
569 #ifdef __SYMBIAN32__ |
|
570 |
|
571 OilFunctionImpl* __oil_function_impl_composite_over_argb_const_src_fast() { |
|
572 return &_oil_function_impl_composite_over_argb_const_src_fast; |
|
573 } |
|
574 #endif |
|
575 |
|
576 #ifdef __SYMBIAN32__ |
|
577 |
|
578 OilFunctionImpl* __oil_function_impl_composite_add_argb_fast() { |
|
579 return &_oil_function_impl_composite_add_argb_fast; |
|
580 } |
|
581 #endif |
|
582 |
|
583 #ifdef __SYMBIAN32__ |
|
584 |
|
585 OilFunctionImpl* __oil_function_impl_composite_add_argb_const_src_fast() { |
|
586 return &_oil_function_impl_composite_add_argb_const_src_fast; |
|
587 } |
|
588 #endif |
|
589 |
|
590 #ifdef __SYMBIAN32__ |
|
591 |
|
592 OilFunctionImpl* __oil_function_impl_composite_in_over_argb_fast() { |
|
593 return &_oil_function_impl_composite_in_over_argb_fast; |
|
594 } |
|
595 #endif |
|
596 |
|
597 #ifdef __SYMBIAN32__ |
|
598 |
|
599 OilFunctionImpl* __oil_function_impl_composite_in_over_argb_const_src_fast() { |
|
600 return &_oil_function_impl_composite_in_over_argb_const_src_fast; |
|
601 } |
|
602 #endif |
|
603 |
|
604 #ifdef __SYMBIAN32__ |
|
605 |
|
606 OilFunctionImpl* __oil_function_impl_composite_in_over_argb_const_mask_fast() { |
|
607 return &_oil_function_impl_composite_in_over_argb_const_mask_fast; |
|
608 } |
|
609 #endif |
|
610 |
|
611 #ifdef HAVE_UNALIGNED_ACCESS |
|
612 #ifdef __SYMBIAN32__ |
|
613 |
|
614 OilFunctionImpl* __oil_function_impl_composite_add_u8_fast() { |
|
615 return &_oil_function_impl_composite_add_u8_fast; |
|
616 } |
|
617 #endif |
|
618 #endif |
|
619 |
|
620 #ifdef HAVE_UNALIGNED_ACCESS |
|
621 #ifdef __SYMBIAN32__ |
|
622 |
|
623 OilFunctionImpl* __oil_function_impl_composite_add_u8_const_src_fast() { |
|
624 return &_oil_function_impl_composite_add_u8_const_src_fast; |
|
625 } |
|
626 #endif |
|
627 #endif |
|
628 |
|
629 #ifdef HAVE_UNALIGNED_ACCESS |
|
630 #ifdef __SYMBIAN32__ |
|
631 |
|
632 OilFunctionImpl* __oil_function_impl_composite_over_u8_fast() { |
|
633 return &_oil_function_impl_composite_over_u8_fast; |
|
634 } |
|
635 #endif |
|
636 #endif |