|
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\klib\x86\cumem.cia |
|
15 // |
|
16 // |
|
17 |
|
18 #include <x86.h> |
|
19 |
|
20 extern "C" { |
|
21 |
|
22 __NAKED__ void CopyInterSeg() |
|
23 // |
|
24 // Copy ECX bytes from DS:ESI to ES:EDI |
|
25 // Modifies EAX, EBX, ECX, EDX, ESI, EDI |
|
26 // |
|
27 { |
|
28 asm("pushfd"); |
|
29 asm("cld"); // assume forward copy initially |
|
30 asm("test ecx,ecx"); // |
|
31 asm("jz short memcopy0");// if length=0, nothing to do |
|
32 asm("xor edx,edx"); // |
|
33 asm("cmp edi,esi"); // compare source and dest addresses |
|
34 asm("jc short memcopy1");// if dest<source, must go forwards |
|
35 asm("std"); // else go backwards |
|
36 asm("add esi,ecx"); // and start at end of block |
|
37 asm("add edi,ecx"); // |
|
38 asm("inc edx"); // edx=1 if backwards, 0 if forwards |
|
39 asm("memcopy1:"); |
|
40 asm("cmp ecx,16"); // if length<16 don't bother with alignment check |
|
41 asm("jc short memcopy2");// |
|
42 asm("mov ebx,edi"); // ebx = destination address |
|
43 asm("and ebx,3"); // ebx bottom 2 bits = alignment of destination wrt hardware bus |
|
44 asm("jz short memcopy3");// if aligned, proceed with block move |
|
45 asm("or edx,edx"); // check direction of move |
|
46 asm("jnz short memcopy4");// if backwards, ebx = number of byte moves to align destination |
|
47 asm("neg ebx"); // else number of byte moves = 4-ebx |
|
48 asm("add ebx,4"); // |
|
49 asm("memcopy4:"); |
|
50 asm("sub ecx,ebx"); // subtract number of bytes from length |
|
51 asm("xchg ecx,ebx"); // temporarily put length in ebx |
|
52 asm("sub edi,edx"); // adjust if backwards move |
|
53 asm("sub esi,edx"); // |
|
54 asm("rep movsb"); // move bytes to align destination |
|
55 asm("add edi,edx"); // adjust if backwards move |
|
56 asm("add esi,edx"); // |
|
57 asm("mov ecx,ebx"); // length back into ecx |
|
58 asm("memcopy3:"); |
|
59 asm("mov ebx,ecx"); // save length in ebx |
|
60 asm("shl edx,2"); // adjustment 4 for backwards move |
|
61 asm("shr ecx,2"); // number of dwords to move into ecx |
|
62 asm("sub edi,edx"); // adjust if backwards move |
|
63 asm("sub esi,edx"); // |
|
64 asm("rep movsd"); // perform DWORD block move |
|
65 asm("add edi,edx"); // adjust if backwards move |
|
66 asm("add esi,edx"); // |
|
67 asm("mov ecx,ebx"); // length back into ecx |
|
68 asm("and ecx,3"); // number of remaining bytes to move |
|
69 asm("jz short memcopy0");// if zero, we are finished |
|
70 asm("shr edx,2"); // adjustment 1 for backwards move |
|
71 asm("memcopy2:"); // *** come here for small move |
|
72 asm("sub edi,edx"); // adjust if backwards move |
|
73 asm("sub esi,edx"); // |
|
74 asm("rep movsb"); // move remaining bytes |
|
75 asm("memcopy0:"); |
|
76 asm("popfd"); |
|
77 asm("ret"); // finished - return value in EAX |
|
78 } |
|
79 |
|
80 __NAKED__ void CopyInterSeg32() |
|
81 // |
|
82 // Copy ECX bytes from DS:ESI to ES:EDI |
|
83 // ECX, ESI and EDI are all multiples of 4 |
|
84 // Modifies EAX, EBX, ECX, EDX, ESI, EDI |
|
85 // |
|
86 { |
|
87 asm("pushfd"); |
|
88 asm("cld"); // |
|
89 asm("test ecx,ecx"); // |
|
90 asm("jz short memmove0");// if length=0, nothing to do |
|
91 asm("cmp edi,esi"); // compare source and dest addresses |
|
92 asm("jc short memmove1");// if dest<source, must go forwards |
|
93 asm("std"); // else go backwards |
|
94 asm("lea esi,[esi+ecx-4]"); // and start at end of block - 4 |
|
95 asm("lea edi,[edi+ecx-4]"); // |
|
96 asm("memmove1:"); |
|
97 asm("shr ecx,2"); // ecx now contains number of dwords to move |
|
98 asm("rep movsd"); // do dword block move |
|
99 asm("memmove0:"); |
|
100 asm("popfd"); |
|
101 asm("ret"); // finished - return value in EAX |
|
102 } |
|
103 |
|
104 __NAKED__ void FillInterSeg() |
|
105 // |
|
106 // Fill ECX bytes at ES:EDI with AL |
|
107 // Modifies EAX, ECX, EDX, EDI |
|
108 // |
|
109 { |
|
110 asm("pushfd"); |
|
111 asm("cld"); // go forwards through array |
|
112 asm("test ecx,ecx"); // |
|
113 asm("jz short memfill0");// if length zero, nothing to do |
|
114 asm("cmp ecx,8"); // if array very small, just do byte fills |
|
115 asm("jb short memfill1"); |
|
116 |
|
117 asm("mov ah,al"); // repeat al in all bytes of eax |
|
118 asm("movzx edx,ax"); // |
|
119 asm("shl eax,16"); // |
|
120 asm("or eax,edx"); // |
|
121 asm("mov edx,ecx"); // length into edx |
|
122 // ecx = number of byte fills to align = 4-(edi mod 4) |
|
123 asm("mov ecx,4"); |
|
124 asm("sub ecx,edi"); // |
|
125 asm("and ecx,3"); // |
|
126 asm("jz short memfill2");// if already aligned, proceed to dword fill |
|
127 asm("sub edx,ecx"); // subtract alignment bytes from length |
|
128 asm("rep stosb"); // do byte fills to align |
|
129 asm("memfill2:"); |
|
130 asm("mov ecx,edx"); // length remaining into ecx |
|
131 asm("shr ecx,2"); // number of dwords to fill into ecx |
|
132 asm("rep stosd"); // perform dword fill |
|
133 asm("mov ecx,edx"); // calculate number of leftover bytes |
|
134 asm("and ecx,3"); // in ecx |
|
135 asm("jz short memfill0");// if none left, exit |
|
136 asm("memfill1:"); |
|
137 asm("rep stosb"); // do byte fills to make up correct length |
|
138 asm("memfill0:"); |
|
139 asm("popfd"); |
|
140 asm("ret"); |
|
141 } |
|
142 |
|
143 |
|
144 /** Reads the current thread's memory space with appropriate permissions. |
|
145 |
|
146 Performs a memcpy(aKernAddr, aAddr, aLength). |
|
147 The reads are performed using requestor privilege level from GS, ie equal |
|
148 to the privilege level of the caller of the Exec:: function. |
|
149 Note that source and destination areas may not overlap. |
|
150 |
|
151 @param aKernAddr Destination address in kernel memory. |
|
152 @param aAddr Source address in kernel or user memory. |
|
153 @param aLength Number of bytes to copy. |
|
154 |
|
155 @pre Call in a thread context. |
|
156 @pre Kernel must be unlocked. |
|
157 @pre Must be called under an XTRAP harness, or calling thread must not be |
|
158 in a critical section. |
|
159 */ |
|
160 EXPORT_C __NAKED__ void kumemget(TAny* /*aKernAddr*/, const TAny* /*aAddr*/, TInt /*aLength*/) |
|
161 { |
|
162 asm("push edi"); |
|
163 asm("push esi"); |
|
164 asm("push ebx"); |
|
165 asm("push ds"); |
|
166 asm("mov edi, [esp+20]"); |
|
167 asm("mov esi, [esp+24]"); |
|
168 asm("mov ecx, [esp+28]"); |
|
169 asm("mov ax, gs"); |
|
170 asm("mov ds, ax"); |
|
171 asm("call %a0": : "i"(&CopyInterSeg)); |
|
172 asm("pop ds"); |
|
173 asm("pop ebx"); |
|
174 asm("pop esi"); |
|
175 asm("pop edi"); |
|
176 asm("ret"); |
|
177 } |
|
178 |
|
179 |
|
180 /** Reads the current thread's memory space with user permissions. |
|
181 |
|
182 Performs a memcpy(aKernAddr, aUserAddr, aLength). |
|
183 The reads are performed with ring 3 RPL. |
|
184 Note that source and destination areas may not overlap. |
|
185 |
|
186 @param aKernAddr Destination address in kernel memory. |
|
187 @param aUserAddr Source address in user memory. |
|
188 @param aLength Number of bytes to copy. |
|
189 |
|
190 @pre Call in a thread context. |
|
191 @pre Kernel must be unlocked. |
|
192 @pre Must be called under an XTRAP harness, or calling thread must not be |
|
193 in a critical section. |
|
194 */ |
|
195 EXPORT_C __NAKED__ void umemget(TAny* /*aKernAddr*/, const TAny* /*aUserAddr*/, TInt /*aLength*/) |
|
196 { |
|
197 asm("push edi"); |
|
198 asm("push esi"); |
|
199 asm("push ebx"); |
|
200 asm("push ds"); |
|
201 asm("mov edi, [esp+20]"); |
|
202 asm("mov esi, [esp+24]"); |
|
203 asm("mov ecx, [esp+28]"); |
|
204 asm("mov eax, %0": : "i"(RING3_DS)); |
|
205 asm("mov ds, ax"); |
|
206 asm("call %a0": : "i"(&CopyInterSeg)); |
|
207 asm("pop ds"); |
|
208 asm("pop ebx"); |
|
209 asm("pop esi"); |
|
210 asm("pop edi"); |
|
211 asm("ret"); |
|
212 } |
|
213 |
|
214 |
|
215 /** Does a word-aligned read of the current thread's memory space with appropriate permissions. |
|
216 |
|
217 Performs a memcpy(aKernAddr, aAddr, aLength). |
|
218 The reads are performed using requestor privilege level from GS, ie equal |
|
219 to the privilege level of the caller of the Exec:: function. |
|
220 Note that source and destination areas may not overlap. |
|
221 |
|
222 @param aKernAddr Destination address in kernel memory, must be 4-byte aligned. |
|
223 @param aAddr Source address in kernel or user memory, must be 4-byte aligned. |
|
224 @param aLength Number of bytes to copy, must be a multiple of 4. |
|
225 |
|
226 @pre Call in a thread context. |
|
227 @pre Kernel must be unlocked. |
|
228 @pre Must be called under an XTRAP harness, or calling thread must not be |
|
229 in a critical section. |
|
230 */ |
|
231 EXPORT_C __NAKED__ void kumemget32(TAny* /*aKernAddr*/, const TAny* /*aAddr*/, TInt /*aLength*/) |
|
232 { |
|
233 asm("push edi"); |
|
234 asm("push esi"); |
|
235 asm("push ebx"); |
|
236 asm("push ds"); |
|
237 asm("mov edi, [esp+20]"); |
|
238 asm("mov esi, [esp+24]"); |
|
239 asm("mov ecx, [esp+28]"); |
|
240 asm("mov ax, gs"); |
|
241 asm("mov ds, ax"); |
|
242 asm("call %a0": : "i"(&CopyInterSeg32)); |
|
243 asm("pop ds"); |
|
244 asm("pop ebx"); |
|
245 asm("pop esi"); |
|
246 asm("pop edi"); |
|
247 asm("ret"); |
|
248 } |
|
249 |
|
250 |
|
251 /** Does a word-aligned read of the current thread's memory space with user permissions. |
|
252 |
|
253 Performs a memcpy(aKernAddr, aUserAddr, aLength). |
|
254 The reads are performed with ring 3 RPL. |
|
255 Note that source and destination areas may not overlap. |
|
256 |
|
257 @param aKernAddr Destination address in kernel memory, must be 4-byte aligned. |
|
258 @param aUserAddr Source address in user memory, must be 4-byte aligned. |
|
259 @param aLength Number of bytes to copy, must be a multiple of 4. |
|
260 |
|
261 @pre Call in a thread context. |
|
262 @pre Kernel must be unlocked. |
|
263 @pre Must be called under an XTRAP harness, or calling thread must not be |
|
264 in a critical section. |
|
265 */ |
|
266 EXPORT_C __NAKED__ void umemget32(TAny* /*aKernAddr*/, const TAny* /*aUserAddr*/, TInt /*aLength*/) |
|
267 { |
|
268 asm("push edi"); |
|
269 asm("push esi"); |
|
270 asm("push ebx"); |
|
271 asm("push ds"); |
|
272 asm("mov edi, [esp+20]"); |
|
273 asm("mov esi, [esp+24]"); |
|
274 asm("mov ecx, [esp+28]"); |
|
275 asm("mov eax, %0": : "i"(RING3_DS)); |
|
276 asm("mov ds, ax"); |
|
277 asm("call %a0": : "i"(&CopyInterSeg32)); |
|
278 asm("pop ds"); |
|
279 asm("pop ebx"); |
|
280 asm("pop esi"); |
|
281 asm("pop edi"); |
|
282 asm("ret"); |
|
283 } |
|
284 |
|
285 |
|
286 /** Writes to the current thread's memory space with appropriate permissions. |
|
287 |
|
288 Performs a memcpy(aAddr, aKernAddr, aLength). |
|
289 The writes are performed using requestor privilege level from GS, ie equal |
|
290 to the privilege level of the caller of the Exec:: function. |
|
291 Note that source and destination areas may not overlap. |
|
292 |
|
293 @param aAddr Destination address in kernel or user memory. |
|
294 @param aKernAddr Source address in kernel memory. |
|
295 @param aLength Number of bytes to copy. |
|
296 |
|
297 @pre Call in a thread context. |
|
298 @pre Kernel must be unlocked. |
|
299 @pre Must be called under an XTRAP harness, or calling thread must not be |
|
300 in a critical section. |
|
301 */ |
|
302 EXPORT_C __NAKED__ void kumemput(TAny* /*aAddr*/, const TAny* /*aKernAddr*/, TInt /*aLength*/) |
|
303 { |
|
304 asm("push edi"); |
|
305 asm("push esi"); |
|
306 asm("push ebx"); |
|
307 asm("push es"); |
|
308 asm("mov edi, [esp+20]"); |
|
309 asm("mov esi, [esp+24]"); |
|
310 asm("mov ecx, [esp+28]"); |
|
311 asm("mov ax, gs"); |
|
312 asm("mov es, ax"); |
|
313 asm("call %a0": : "i"(&CopyInterSeg)); |
|
314 asm("pop es"); |
|
315 asm("pop ebx"); |
|
316 asm("pop esi"); |
|
317 asm("pop edi"); |
|
318 asm("ret"); |
|
319 } |
|
320 |
|
321 |
|
322 /** Writes to the current thread's memory space with user permissions. |
|
323 |
|
324 Performs a memcpy(aAddr, aKernAddr, aLength). |
|
325 The writes are performed with ring 3 RPL. |
|
326 Note that source and destination areas may not overlap. |
|
327 |
|
328 @param aUserAddr Destination address in user memory. |
|
329 @param aKernAddr Source address in kernel memory. |
|
330 @param aLength Number of bytes to copy. |
|
331 |
|
332 @pre Call in a thread context. |
|
333 @pre Kernel must be unlocked. |
|
334 @pre Must be called under an XTRAP harness, or calling thread must not be |
|
335 in a critical section. |
|
336 */ |
|
337 EXPORT_C __NAKED__ void umemput(TAny* /*aUserAddr*/, const TAny* /*aKernAddr*/, TInt /*aLength*/) |
|
338 { |
|
339 asm("push edi"); |
|
340 asm("push esi"); |
|
341 asm("push ebx"); |
|
342 asm("push es"); |
|
343 asm("mov edi, [esp+20]"); |
|
344 asm("mov esi, [esp+24]"); |
|
345 asm("mov ecx, [esp+28]"); |
|
346 asm("mov eax, %0": : "i"(RING3_DS)); |
|
347 asm("mov es, ax"); |
|
348 asm("call %a0": : "i"(&CopyInterSeg)); |
|
349 asm("pop es"); |
|
350 asm("pop ebx"); |
|
351 asm("pop esi"); |
|
352 asm("pop edi"); |
|
353 asm("ret"); |
|
354 } |
|
355 |
|
356 |
|
357 /** Does a word-aligned write to the current thread's memory space with appropriate permissions. |
|
358 |
|
359 Performs a memcpy(aAddr, aKernAddr, aLength). |
|
360 The writes are performed using requestor privilege level from GS, ie equal |
|
361 to the privilege level of the caller of the Exec:: function. |
|
362 Note that source and destination areas may not overlap. |
|
363 |
|
364 @param aAddr Destination address in kernel or user memory, must be 4-byte aligned. |
|
365 @param aKernAddr Source address in kernel memory, must be 4-byte aligned. |
|
366 @param aLength Number of bytes to copy, must be a multiple of 4. |
|
367 |
|
368 @pre Call in a thread context. |
|
369 @pre Kernel must be unlocked. |
|
370 @pre Must be called under an XTRAP harness, or calling thread must not be |
|
371 in a critical section. |
|
372 */ |
|
373 EXPORT_C __NAKED__ void kumemput32(TAny* /*aAddr*/, const TAny* /*aKernAddr*/, TInt /*aLength*/) |
|
374 { |
|
375 asm("push edi"); |
|
376 asm("push esi"); |
|
377 asm("push ebx"); |
|
378 asm("push es"); |
|
379 asm("mov edi, [esp+20]"); |
|
380 asm("mov esi, [esp+24]"); |
|
381 asm("mov ecx, [esp+28]"); |
|
382 asm("mov ax, gs"); |
|
383 asm("mov es, ax"); |
|
384 asm("call %a0": : "i"(&CopyInterSeg32)); |
|
385 asm("pop es"); |
|
386 asm("pop ebx"); |
|
387 asm("pop esi"); |
|
388 asm("pop edi"); |
|
389 asm("ret"); |
|
390 } |
|
391 |
|
392 |
|
393 /** Does a word-aligned write to the current thread's memory space with user permissions. |
|
394 |
|
395 Performs a memcpy(aAddr, aKernAddr, aLength). |
|
396 The writes are performed with ring 3 RPL. |
|
397 Note that source and destination areas may not overlap. |
|
398 |
|
399 @param aUserAddr Destination address in user memory, must be 4-byte aligned. |
|
400 @param aKernAddr Source address in kernel memory, must be 4-byte aligned. |
|
401 @param aLength Number of bytes to copy, must be a multiple of 4. |
|
402 |
|
403 @pre Call in a thread context. |
|
404 @pre Kernel must be unlocked. |
|
405 @pre Must be called under an XTRAP harness, or calling thread must not be |
|
406 in a critical section. |
|
407 */ |
|
408 EXPORT_C __NAKED__ void umemput32(TAny* /*aUserAddr*/, const TAny* /*aKernAddr*/, TInt /*aLength*/) |
|
409 { |
|
410 asm("push edi"); |
|
411 asm("push esi"); |
|
412 asm("push ebx"); |
|
413 asm("push es"); |
|
414 asm("mov edi, [esp+20]"); |
|
415 asm("mov esi, [esp+24]"); |
|
416 asm("mov ecx, [esp+28]"); |
|
417 asm("mov eax, %0": : "i"(RING3_DS)); |
|
418 asm("mov es, ax"); |
|
419 asm("call %a0": : "i"(&CopyInterSeg32)); |
|
420 asm("pop es"); |
|
421 asm("pop ebx"); |
|
422 asm("pop esi"); |
|
423 asm("pop edi"); |
|
424 asm("ret"); |
|
425 } |
|
426 |
|
427 |
|
428 /** Fills the current thread's memory space with appropriate permissions. |
|
429 |
|
430 Performs a memset(aAddr, aValue, aLength). |
|
431 The writes are performed using requestor privilege level from GS, ie equal |
|
432 to the privilege level of the caller of the Exec:: function. |
|
433 |
|
434 @param aAddr Destination address in kernel or user memory. |
|
435 @param aValue Value to write to each byte. |
|
436 @param aLength Number of bytes to fill. |
|
437 |
|
438 @pre Call in a thread context. |
|
439 @pre Kernel must be unlocked. |
|
440 @pre Must be called under an XTRAP harness, or calling thread must not be |
|
441 in a critical section. |
|
442 */ |
|
443 EXPORT_C __NAKED__ void kumemset(TAny* /*aAddr*/, const TUint8 /*aValue*/, TInt /*aLength*/) |
|
444 { |
|
445 asm("push edi"); |
|
446 asm("push es"); |
|
447 asm("mov edi, [esp+12]"); |
|
448 asm("mov eax, [esp+16]"); |
|
449 asm("mov ecx, [esp+20]"); |
|
450 asm("mov dx, gs"); |
|
451 asm("mov es, dx"); |
|
452 asm("call %a0": :"i"(&FillInterSeg)); |
|
453 asm("pop es"); |
|
454 asm("pop edi"); |
|
455 asm("ret"); |
|
456 } |
|
457 |
|
458 |
|
459 /** Fills the current thread's memory space with user permissions. |
|
460 |
|
461 Performs a memset(aUserAddr, aValue, aLength). |
|
462 The writes are performed with ring 3 RPL. |
|
463 |
|
464 @param aUserAddr Destination address in user memory. |
|
465 @param aValue Value to write to each byte. |
|
466 @param aLength Number of bytes to fill. |
|
467 |
|
468 @pre Call in a thread context. |
|
469 @pre Kernel must be unlocked. |
|
470 @pre Must be called under an XTRAP harness, or calling thread must not be |
|
471 in a critical section. |
|
472 */ |
|
473 EXPORT_C __NAKED__ void umemset(TAny* /*aUserAddr*/, const TUint8 /*aValue*/, TInt /*aLength*/) |
|
474 { |
|
475 asm("push edi"); |
|
476 asm("push es"); |
|
477 asm("mov edi, [esp+12]"); |
|
478 asm("mov eax, [esp+16]"); |
|
479 asm("mov ecx, [esp+20]"); |
|
480 asm("mov edx, %0": : "i"(RING3_DS)); |
|
481 asm("mov es, dx"); |
|
482 asm("call %a0": :"i"(&FillInterSeg)); |
|
483 asm("pop es"); |
|
484 asm("pop edi"); |
|
485 asm("ret"); |
|
486 } |
|
487 |
|
488 __NAKED__ void uumemcpy32(TAny* /*aUserDst*/, const TAny* /*aUserSrc*/, TInt /*aLength*/) |
|
489 { |
|
490 asm("push edi"); |
|
491 asm("push esi"); |
|
492 asm("push ebx"); |
|
493 asm("push ds"); |
|
494 asm("push es"); |
|
495 asm("mov edi, [esp+24]"); |
|
496 asm("mov esi, [esp+28]"); |
|
497 asm("mov ecx, [esp+32]"); |
|
498 asm("mov ax, gs"); |
|
499 asm("mov ds, ax"); |
|
500 asm("mov es, ax"); |
|
501 asm("call %a0": : "i"(&CopyInterSeg32)); |
|
502 asm("pop es"); |
|
503 asm("pop ds"); |
|
504 asm("pop ebx"); |
|
505 asm("pop esi"); |
|
506 asm("pop edi"); |
|
507 asm("ret"); |
|
508 } |
|
509 |
|
510 __NAKED__ void uumemcpy(TAny* /*aUserDst*/, const TAny* /*aUserSrc*/, TInt /*aLength*/) |
|
511 { |
|
512 asm("push edi"); |
|
513 asm("push esi"); |
|
514 asm("push ebx"); |
|
515 asm("push ds"); |
|
516 asm("push es"); |
|
517 asm("mov edi, [esp+24]"); |
|
518 asm("mov esi, [esp+28]"); |
|
519 asm("mov ecx, [esp+32]"); |
|
520 asm("mov ax, gs"); |
|
521 asm("mov ds, ax"); |
|
522 asm("mov es, ax"); |
|
523 asm("call %a0": : "i"(&CopyInterSeg)); |
|
524 asm("pop es"); |
|
525 asm("pop ds"); |
|
526 asm("pop ebx"); |
|
527 asm("pop esi"); |
|
528 asm("pop edi"); |
|
529 asm("ret"); |
|
530 } |
|
531 |
|
532 } |