|
1 /* crypto/md32_common.h */ |
|
2 /* ==================================================================== |
|
3 * Copyright (c) 1999-2002 The OpenSSL Project. All rights reserved. |
|
4 * |
|
5 * Redistribution and use in source and binary forms, with or without |
|
6 * modification, are permitted provided that the following conditions |
|
7 * are met: |
|
8 * |
|
9 * 1. Redistributions of source code must retain the above copyright |
|
10 * notice, this list of conditions and the following disclaimer. |
|
11 * |
|
12 * 2. Redistributions in binary form must reproduce the above copyright |
|
13 * notice, this list of conditions and the following disclaimer in |
|
14 * the documentation and/or other materials provided with the |
|
15 * distribution. |
|
16 * |
|
17 * 3. All advertising materials mentioning features or use of this |
|
18 * software must display the following acknowledgment: |
|
19 * "This product includes software developed by the OpenSSL Project |
|
20 * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" |
|
21 * |
|
22 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to |
|
23 * endorse or promote products derived from this software without |
|
24 * prior written permission. For written permission, please contact |
|
25 * licensing@OpenSSL.org. |
|
26 * |
|
27 * 5. Products derived from this software may not be called "OpenSSL" |
|
28 * nor may "OpenSSL" appear in their names without prior written |
|
29 * permission of the OpenSSL Project. |
|
30 * |
|
31 * 6. Redistributions of any form whatsoever must retain the following |
|
32 * acknowledgment: |
|
33 * "This product includes software developed by the OpenSSL Project |
|
34 * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" |
|
35 * |
|
36 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY |
|
37 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
|
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
39 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR |
|
40 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
|
41 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
|
42 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
|
43 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
|
44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
|
45 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
|
46 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
|
47 * OF THE POSSIBILITY OF SUCH DAMAGE. |
|
48 * ==================================================================== |
|
49 * |
|
50 * This product includes cryptographic software written by Eric Young |
|
51 * (eay@cryptsoft.com). This product includes software written by Tim |
|
52 * Hudson (tjh@cryptsoft.com). |
|
53 * |
|
54 */ |
|
55 |
|
56 /* |
|
57 * This is a generic 32 bit "collector" for message digest algorithms. |
|
58 * Whenever needed it collects input character stream into chunks of |
|
59 * 32 bit values and invokes a block function that performs actual hash |
|
60 * calculations. |
|
61 * |
|
62 * Porting guide. |
|
63 * |
|
64 * Obligatory macros: |
|
65 * |
|
66 * DATA_ORDER_IS_BIG_ENDIAN or DATA_ORDER_IS_LITTLE_ENDIAN |
|
67 * this macro defines byte order of input stream. |
|
68 * HASH_CBLOCK |
|
69 * size of a unit chunk HASH_BLOCK operates on. |
|
70 * HASH_LONG |
|
71 * has to be at lest 32 bit wide, if it's wider, then |
|
72 * HASH_LONG_LOG2 *has to* be defined along |
|
73 * HASH_CTX |
|
74 * context structure that at least contains following |
|
75 * members: |
|
76 * typedef struct { |
|
77 * ... |
|
78 * HASH_LONG Nl,Nh; |
|
79 * HASH_LONG data[HASH_LBLOCK]; |
|
80 * unsigned int num; |
|
81 * ... |
|
82 * } HASH_CTX; |
|
83 * HASH_UPDATE |
|
84 * name of "Update" function, implemented here. |
|
85 * HASH_TRANSFORM |
|
86 * name of "Transform" function, implemented here. |
|
87 * HASH_FINAL |
|
88 * name of "Final" function, implemented here. |
|
89 * HASH_BLOCK_HOST_ORDER |
|
90 * name of "block" function treating *aligned* input message |
|
91 * in host byte order, implemented externally. |
|
92 * HASH_BLOCK_DATA_ORDER |
|
93 * name of "block" function treating *unaligned* input message |
|
94 * in original (data) byte order, implemented externally (it |
|
95 * actually is optional if data and host are of the same |
|
96 * "endianess"). |
|
97 * HASH_MAKE_STRING |
|
98 * macro convering context variables to an ASCII hash string. |
|
99 * |
|
100 * Optional macros: |
|
101 * |
|
102 * B_ENDIAN or L_ENDIAN |
|
103 * defines host byte-order. |
|
104 * HASH_LONG_LOG2 |
|
105 * defaults to 2 if not states otherwise. |
|
106 * HASH_LBLOCK |
|
107 * assumed to be HASH_CBLOCK/4 if not stated otherwise. |
|
108 * HASH_BLOCK_DATA_ORDER_ALIGNED |
|
109 * alternative "block" function capable of treating |
|
110 * aligned input message in original (data) order, |
|
111 * implemented externally. |
|
112 * |
|
113 * MD5 example: |
|
114 * |
|
115 * #define DATA_ORDER_IS_LITTLE_ENDIAN |
|
116 * |
|
117 * #define HASH_LONG MD5_LONG |
|
118 * #define HASH_LONG_LOG2 MD5_LONG_LOG2 |
|
119 * #define HASH_CTX MD5_CTX |
|
120 * #define HASH_CBLOCK MD5_CBLOCK |
|
121 * #define HASH_LBLOCK MD5_LBLOCK |
|
122 * #define HASH_UPDATE MD5_Update |
|
123 * #define HASH_TRANSFORM MD5_Transform |
|
124 * #define HASH_FINAL MD5_Final |
|
125 * #define HASH_BLOCK_HOST_ORDER md5_block_host_order |
|
126 * #define HASH_BLOCK_DATA_ORDER md5_block_data_order |
|
127 * |
|
128 * <appro@fy.chalmers.se> |
|
129 */ |
|
130 |
|
131 #if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN) |
|
132 #error "DATA_ORDER must be defined!" |
|
133 #endif |
|
134 |
|
135 #ifndef HASH_CBLOCK |
|
136 #error "HASH_CBLOCK must be defined!" |
|
137 #endif |
|
138 #ifndef HASH_LONG |
|
139 #error "HASH_LONG must be defined!" |
|
140 #endif |
|
141 #ifndef HASH_CTX |
|
142 #error "HASH_CTX must be defined!" |
|
143 #endif |
|
144 |
|
145 #ifndef HASH_UPDATE |
|
146 #error "HASH_UPDATE must be defined!" |
|
147 #endif |
|
148 #ifndef HASH_TRANSFORM |
|
149 #error "HASH_TRANSFORM must be defined!" |
|
150 #endif |
|
151 #ifndef HASH_FINAL |
|
152 #error "HASH_FINAL must be defined!" |
|
153 #endif |
|
154 |
|
155 #ifndef HASH_BLOCK_HOST_ORDER |
|
156 #error "HASH_BLOCK_HOST_ORDER must be defined!" |
|
157 #endif |
|
158 |
|
159 #if 0 |
|
160 /* |
|
161 * Moved below as it's required only if HASH_BLOCK_DATA_ORDER_ALIGNED |
|
162 * isn't defined. |
|
163 */ |
|
164 #ifndef HASH_BLOCK_DATA_ORDER |
|
165 #error "HASH_BLOCK_DATA_ORDER must be defined!" |
|
166 #endif |
|
167 #endif |
|
168 |
|
169 #ifndef HASH_LBLOCK |
|
170 #define HASH_LBLOCK (HASH_CBLOCK/4) |
|
171 #endif |
|
172 |
|
173 #ifndef HASH_LONG_LOG2 |
|
174 #define HASH_LONG_LOG2 2 |
|
175 #endif |
|
176 |
|
177 /* |
|
178 * Engage compiler specific rotate intrinsic function if available. |
|
179 */ |
|
180 #undef ROTATE |
|
181 #ifndef PEDANTIC |
|
182 # if defined(_MSC_VER) || defined(__ICC) |
|
183 # define ROTATE(a,n) _lrotl(a,n) |
|
184 # elif defined(__MWERKS__) |
|
185 # if defined(__POWERPC__) |
|
186 # define ROTATE(a,n) __rlwinm(a,n,0,31) |
|
187 # elif defined(__MC68K__) |
|
188 /* Motorola specific tweak. <appro@fy.chalmers.se> */ |
|
189 # define ROTATE(a,n) ( n<24 ? __rol(a,n) : __ror(a,32-n) ) |
|
190 # else |
|
191 # define ROTATE(a,n) __rol(a,n) |
|
192 # endif |
|
193 # elif defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) |
|
194 /* |
|
195 * Some GNU C inline assembler templates. Note that these are |
|
196 * rotates by *constant* number of bits! But that's exactly |
|
197 * what we need here... |
|
198 * <appro@fy.chalmers.se> |
|
199 */ |
|
200 # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) |
|
201 # define ROTATE(a,n) ({ register unsigned int ret; \ |
|
202 asm ( \ |
|
203 "roll %1,%0" \ |
|
204 : "=r"(ret) \ |
|
205 : "I"(n), "0"(a) \ |
|
206 : "cc"); \ |
|
207 ret; \ |
|
208 }) |
|
209 # elif defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__) |
|
210 # define ROTATE(a,n) ({ register unsigned int ret; \ |
|
211 asm ( \ |
|
212 "rlwinm %0,%1,%2,0,31" \ |
|
213 : "=r"(ret) \ |
|
214 : "r"(a), "I"(n)); \ |
|
215 ret; \ |
|
216 }) |
|
217 # endif |
|
218 # endif |
|
219 #endif /* PEDANTIC */ |
|
220 |
|
221 #if HASH_LONG_LOG2==2 /* Engage only if sizeof(HASH_LONG)== 4 */ |
|
222 /* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */ |
|
223 #ifdef ROTATE |
|
224 /* 5 instructions with rotate instruction, else 9 */ |
|
225 #define REVERSE_FETCH32(a,l) ( \ |
|
226 l=*(const HASH_LONG *)(a), \ |
|
227 ((ROTATE(l,8)&0x00FF00FF)|(ROTATE((l&0x00FF00FF),24))) \ |
|
228 ) |
|
229 #else |
|
230 /* 6 instructions with rotate instruction, else 8 */ |
|
231 #define REVERSE_FETCH32(a,l) ( \ |
|
232 l=*(const HASH_LONG *)(a), \ |
|
233 l=(((l>>8)&0x00FF00FF)|((l&0x00FF00FF)<<8)), \ |
|
234 ROTATE(l,16) \ |
|
235 ) |
|
236 /* |
|
237 * Originally the middle line started with l=(((l&0xFF00FF00)>>8)|... |
|
238 * It's rewritten as above for two reasons: |
|
239 * - RISCs aren't good at long constants and have to explicitely |
|
240 * compose 'em with several (well, usually 2) instructions in a |
|
241 * register before performing the actual operation and (as you |
|
242 * already realized:-) having same constant should inspire the |
|
243 * compiler to permanently allocate the only register for it; |
|
244 * - most modern CPUs have two ALUs, but usually only one has |
|
245 * circuitry for shifts:-( this minor tweak inspires compiler |
|
246 * to schedule shift instructions in a better way... |
|
247 * |
|
248 * <appro@fy.chalmers.se> |
|
249 */ |
|
250 #endif |
|
251 #endif |
|
252 |
|
253 #ifndef ROTATE |
|
254 #define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n)))) |
|
255 #endif |
|
256 |
|
257 /* |
|
258 * Make some obvious choices. E.g., HASH_BLOCK_DATA_ORDER_ALIGNED |
|
259 * and HASH_BLOCK_HOST_ORDER ought to be the same if input data |
|
260 * and host are of the same "endianess". It's possible to mask |
|
261 * this with blank #define HASH_BLOCK_DATA_ORDER though... |
|
262 * |
|
263 * <appro@fy.chalmers.se> |
|
264 */ |
|
265 #if defined(B_ENDIAN) |
|
266 # if defined(DATA_ORDER_IS_BIG_ENDIAN) |
|
267 # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2 |
|
268 # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER |
|
269 # endif |
|
270 # endif |
|
271 #elif defined(L_ENDIAN) |
|
272 # if defined(DATA_ORDER_IS_LITTLE_ENDIAN) |
|
273 # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2 |
|
274 # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER |
|
275 # endif |
|
276 # endif |
|
277 #endif |
|
278 |
|
279 #if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) |
|
280 #ifndef HASH_BLOCK_DATA_ORDER |
|
281 #error "HASH_BLOCK_DATA_ORDER must be defined!" |
|
282 #endif |
|
283 #endif |
|
284 |
|
285 #if defined(DATA_ORDER_IS_BIG_ENDIAN) |
|
286 |
|
287 #ifndef PEDANTIC |
|
288 # if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) |
|
289 # if ((defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)) || \ |
|
290 (defined(__x86_64) || defined(__x86_64__)) |
|
291 /* |
|
292 * This gives ~30-40% performance improvement in SHA-256 compiled |
|
293 * with gcc [on P4]. Well, first macro to be frank. We can pull |
|
294 * this trick on x86* platforms only, because these CPUs can fetch |
|
295 * unaligned data without raising an exception. |
|
296 */ |
|
297 # define HOST_c2l(c,l) ({ unsigned int r=*((const unsigned int *)(c)); \ |
|
298 asm ("bswapl %0":"=r"(r):"0"(r)); \ |
|
299 (c)+=4; (l)=r; }) |
|
300 # define HOST_l2c(l,c) ({ unsigned int r=(l); \ |
|
301 asm ("bswapl %0":"=r"(r):"0"(r)); \ |
|
302 *((unsigned int *)(c))=r; (c)+=4; r; }) |
|
303 # endif |
|
304 # endif |
|
305 #endif |
|
306 |
|
307 #ifndef HOST_c2l |
|
308 #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++)))<<24), \ |
|
309 l|=(((unsigned long)(*((c)++)))<<16), \ |
|
310 l|=(((unsigned long)(*((c)++)))<< 8), \ |
|
311 l|=(((unsigned long)(*((c)++))) ), \ |
|
312 l) |
|
313 #endif |
|
314 #define HOST_p_c2l(c,l,n) { \ |
|
315 switch (n) { \ |
|
316 case 0: l =((unsigned long)(*((c)++)))<<24; \ |
|
317 case 1: l|=((unsigned long)(*((c)++)))<<16; \ |
|
318 case 2: l|=((unsigned long)(*((c)++)))<< 8; \ |
|
319 case 3: l|=((unsigned long)(*((c)++))); \ |
|
320 } } |
|
321 #define HOST_p_c2l_p(c,l,sc,len) { \ |
|
322 switch (sc) { \ |
|
323 case 0: l =((unsigned long)(*((c)++)))<<24; \ |
|
324 if (--len == 0) break; \ |
|
325 case 1: l|=((unsigned long)(*((c)++)))<<16; \ |
|
326 if (--len == 0) break; \ |
|
327 case 2: l|=((unsigned long)(*((c)++)))<< 8; \ |
|
328 } } |
|
329 /* NOTE the pointer is not incremented at the end of this */ |
|
330 #define HOST_c2l_p(c,l,n) { \ |
|
331 l=0; (c)+=n; \ |
|
332 switch (n) { \ |
|
333 case 3: l =((unsigned long)(*(--(c))))<< 8; \ |
|
334 case 2: l|=((unsigned long)(*(--(c))))<<16; \ |
|
335 case 1: l|=((unsigned long)(*(--(c))))<<24; \ |
|
336 } } |
|
337 #ifndef HOST_l2c |
|
338 #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \ |
|
339 *((c)++)=(unsigned char)(((l)>>16)&0xff), \ |
|
340 *((c)++)=(unsigned char)(((l)>> 8)&0xff), \ |
|
341 *((c)++)=(unsigned char)(((l) )&0xff), \ |
|
342 l) |
|
343 #endif |
|
344 |
|
345 #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) |
|
346 |
|
347 #if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) |
|
348 # ifndef B_ENDIAN |
|
349 /* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */ |
|
350 # define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, l) |
|
351 # define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, l) |
|
352 # endif |
|
353 #endif |
|
354 |
|
355 #ifndef HOST_c2l |
|
356 #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++))) ), \ |
|
357 l|=(((unsigned long)(*((c)++)))<< 8), \ |
|
358 l|=(((unsigned long)(*((c)++)))<<16), \ |
|
359 l|=(((unsigned long)(*((c)++)))<<24), \ |
|
360 l) |
|
361 #endif |
|
362 #define HOST_p_c2l(c,l,n) { \ |
|
363 switch (n) { \ |
|
364 case 0: l =((unsigned long)(*((c)++))); \ |
|
365 case 1: l|=((unsigned long)(*((c)++)))<< 8; \ |
|
366 case 2: l|=((unsigned long)(*((c)++)))<<16; \ |
|
367 case 3: l|=((unsigned long)(*((c)++)))<<24; \ |
|
368 } } |
|
369 #define HOST_p_c2l_p(c,l,sc,len) { \ |
|
370 switch (sc) { \ |
|
371 case 0: l =((unsigned long)(*((c)++))); \ |
|
372 if (--len == 0) break; \ |
|
373 case 1: l|=((unsigned long)(*((c)++)))<< 8; \ |
|
374 if (--len == 0) break; \ |
|
375 case 2: l|=((unsigned long)(*((c)++)))<<16; \ |
|
376 } } |
|
377 /* NOTE the pointer is not incremented at the end of this */ |
|
378 #define HOST_c2l_p(c,l,n) { \ |
|
379 l=0; (c)+=n; \ |
|
380 switch (n) { \ |
|
381 case 3: l =((unsigned long)(*(--(c))))<<16; \ |
|
382 case 2: l|=((unsigned long)(*(--(c))))<< 8; \ |
|
383 case 1: l|=((unsigned long)(*(--(c)))); \ |
|
384 } } |
|
385 #ifndef HOST_l2c |
|
386 #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \ |
|
387 *((c)++)=(unsigned char)(((l)>> 8)&0xff), \ |
|
388 *((c)++)=(unsigned char)(((l)>>16)&0xff), \ |
|
389 *((c)++)=(unsigned char)(((l)>>24)&0xff), \ |
|
390 l) |
|
391 #endif |
|
392 |
|
393 #endif |
|
394 |
|
395 /* |
|
396 * Time for some action:-) |
|
397 */ |
|
398 |
|
399 EXPORT_C int HASH_UPDATE (HASH_CTX *c, const void *data_, size_t len) |
|
400 { |
|
401 const unsigned char *data=data_; |
|
402 register HASH_LONG * p; |
|
403 register HASH_LONG l; |
|
404 size_t sw,sc,ew,ec; |
|
405 |
|
406 if (len==0) return 1; |
|
407 |
|
408 l=(c->Nl+(((HASH_LONG)len)<<3))&0xffffffffUL; |
|
409 /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to |
|
410 * Wei Dai <weidai@eskimo.com> for pointing it out. */ |
|
411 if (l < c->Nl) /* overflow */ |
|
412 c->Nh++; |
|
413 c->Nh+=(len>>29); /* might cause compiler warning on 16-bit */ |
|
414 c->Nl=l; |
|
415 |
|
416 if (c->num != 0) |
|
417 { |
|
418 p=c->data; |
|
419 sw=c->num>>2; |
|
420 sc=c->num&0x03; |
|
421 |
|
422 if ((c->num+len) >= HASH_CBLOCK) |
|
423 { |
|
424 l=p[sw]; HOST_p_c2l(data,l,sc); p[sw++]=l; |
|
425 for (; sw<HASH_LBLOCK; sw++) |
|
426 { |
|
427 HOST_c2l(data,l); p[sw]=l; |
|
428 } |
|
429 HASH_BLOCK_HOST_ORDER (c,p,1); |
|
430 len-=(HASH_CBLOCK-c->num); |
|
431 c->num=0; |
|
432 /* drop through and do the rest */ |
|
433 } |
|
434 else |
|
435 { |
|
436 c->num+=(unsigned int)len; |
|
437 if ((sc+len) < 4) /* ugly, add char's to a word */ |
|
438 { |
|
439 l=p[sw]; HOST_p_c2l_p(data,l,sc,len); p[sw]=l; |
|
440 } |
|
441 else |
|
442 { |
|
443 ew=(c->num>>2); |
|
444 ec=(c->num&0x03); |
|
445 if (sc) |
|
446 l=p[sw]; |
|
447 HOST_p_c2l(data,l,sc); |
|
448 p[sw++]=l; |
|
449 for (; sw < ew; sw++) |
|
450 { |
|
451 HOST_c2l(data,l); p[sw]=l; |
|
452 } |
|
453 if (ec) |
|
454 { |
|
455 HOST_c2l_p(data,l,ec); p[sw]=l; |
|
456 } |
|
457 } |
|
458 return 1; |
|
459 } |
|
460 } |
|
461 |
|
462 sw=len/HASH_CBLOCK; |
|
463 if (sw > 0) |
|
464 { |
|
465 #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED) |
|
466 /* |
|
467 * Note that HASH_BLOCK_DATA_ORDER_ALIGNED gets defined |
|
468 * only if sizeof(HASH_LONG)==4. |
|
469 */ |
|
470 if ((((size_t)data)%4) == 0) |
|
471 { |
|
472 /* data is properly aligned so that we can cast it: */ |
|
473 HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,sw); |
|
474 sw*=HASH_CBLOCK; |
|
475 data+=sw; |
|
476 len-=sw; |
|
477 } |
|
478 else |
|
479 #if !defined(HASH_BLOCK_DATA_ORDER) |
|
480 while (sw--) |
|
481 { |
|
482 memcpy (p=c->data,data,HASH_CBLOCK); |
|
483 HASH_BLOCK_DATA_ORDER_ALIGNED(c,p,1); |
|
484 data+=HASH_CBLOCK; |
|
485 len-=HASH_CBLOCK; |
|
486 } |
|
487 #endif |
|
488 #endif |
|
489 #if defined(HASH_BLOCK_DATA_ORDER) |
|
490 { |
|
491 HASH_BLOCK_DATA_ORDER(c,data,sw); |
|
492 sw*=HASH_CBLOCK; |
|
493 data+=sw; |
|
494 len-=sw; |
|
495 } |
|
496 #endif |
|
497 } |
|
498 |
|
499 if (len!=0) |
|
500 { |
|
501 p = c->data; |
|
502 c->num = len; |
|
503 ew=len>>2; /* words to copy */ |
|
504 ec=len&0x03; |
|
505 for (; ew; ew--,p++) |
|
506 { |
|
507 HOST_c2l(data,l); *p=l; |
|
508 } |
|
509 HOST_c2l_p(data,l,ec); |
|
510 *p=l; |
|
511 } |
|
512 return 1; |
|
513 } |
|
514 |
|
515 |
|
516 EXPORT_C void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data) |
|
517 { |
|
518 #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED) |
|
519 if ((((size_t)data)%4) == 0) |
|
520 /* data is properly aligned so that we can cast it: */ |
|
521 HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,1); |
|
522 else |
|
523 #if !defined(HASH_BLOCK_DATA_ORDER) |
|
524 { |
|
525 memcpy (c->data,data,HASH_CBLOCK); |
|
526 HASH_BLOCK_DATA_ORDER_ALIGNED (c,c->data,1); |
|
527 } |
|
528 #endif |
|
529 #endif |
|
530 #if defined(HASH_BLOCK_DATA_ORDER) |
|
531 HASH_BLOCK_DATA_ORDER (c,data,1); |
|
532 #endif |
|
533 } |
|
534 |
|
535 |
|
536 EXPORT_C int HASH_FINAL (unsigned char *md, HASH_CTX *c) |
|
537 { |
|
538 register HASH_LONG *p; |
|
539 register unsigned long l; |
|
540 register int i,j; |
|
541 static const unsigned char end[4]={0x80,0x00,0x00,0x00}; |
|
542 const unsigned char *cp=end; |
|
543 |
|
544 /* c->num should definitly have room for at least one more byte. */ |
|
545 p=c->data; |
|
546 i=c->num>>2; |
|
547 j=c->num&0x03; |
|
548 |
|
549 #if 0 |
|
550 /* purify often complains about the following line as an |
|
551 * Uninitialized Memory Read. While this can be true, the |
|
552 * following p_c2l macro will reset l when that case is true. |
|
553 * This is because j&0x03 contains the number of 'valid' bytes |
|
554 * already in p[i]. If and only if j&0x03 == 0, the UMR will |
|
555 * occur but this is also the only time p_c2l will do |
|
556 * l= *(cp++) instead of l|= *(cp++) |
|
557 * Many thanks to Alex Tang <altitude@cic.net> for pickup this |
|
558 * 'potential bug' */ |
|
559 #ifdef PURIFY |
|
560 if (j==0) p[i]=0; /* Yeah, but that's not the way to fix it:-) */ |
|
561 #endif |
|
562 l=p[i]; |
|
563 #else |
|
564 l = (j==0) ? 0 : p[i]; |
|
565 #endif |
|
566 HOST_p_c2l(cp,l,j); p[i++]=l; /* i is the next 'undefined word' */ |
|
567 |
|
568 if (i>(HASH_LBLOCK-2)) /* save room for Nl and Nh */ |
|
569 { |
|
570 if (i<HASH_LBLOCK) p[i]=0; |
|
571 HASH_BLOCK_HOST_ORDER (c,p,1); |
|
572 i=0; |
|
573 } |
|
574 for (; i<(HASH_LBLOCK-2); i++) |
|
575 p[i]=0; |
|
576 |
|
577 #if defined(DATA_ORDER_IS_BIG_ENDIAN) |
|
578 p[HASH_LBLOCK-2]=c->Nh; |
|
579 p[HASH_LBLOCK-1]=c->Nl; |
|
580 #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) |
|
581 p[HASH_LBLOCK-2]=c->Nl; |
|
582 p[HASH_LBLOCK-1]=c->Nh; |
|
583 #endif |
|
584 HASH_BLOCK_HOST_ORDER (c,p,1); |
|
585 |
|
586 #ifndef HASH_MAKE_STRING |
|
587 #error "HASH_MAKE_STRING must be defined!" |
|
588 #else |
|
589 HASH_MAKE_STRING(c,md); |
|
590 #endif |
|
591 |
|
592 c->num=0; |
|
593 /* clear stuff, HASH_BLOCK may be leaving some stuff on the stack |
|
594 * but I'm not worried :-) |
|
595 OPENSSL_cleanse((void *)c,sizeof(HASH_CTX)); |
|
596 */ |
|
597 return 1; |
|
598 } |
|
599 |
|
600 #ifndef MD32_REG_T |
|
601 #define MD32_REG_T long |
|
602 /* |
|
603 * This comment was originaly written for MD5, which is why it |
|
604 * discusses A-D. But it basically applies to all 32-bit digests, |
|
605 * which is why it was moved to common header file. |
|
606 * |
|
607 * In case you wonder why A-D are declared as long and not |
|
608 * as MD5_LONG. Doing so results in slight performance |
|
609 * boost on LP64 architectures. The catch is we don't |
|
610 * really care if 32 MSBs of a 64-bit register get polluted |
|
611 * with eventual overflows as we *save* only 32 LSBs in |
|
612 * *either* case. Now declaring 'em long excuses the compiler |
|
613 * from keeping 32 MSBs zeroed resulting in 13% performance |
|
614 * improvement under SPARC Solaris7/64 and 5% under AlphaLinux. |
|
615 * Well, to be honest it should say that this *prevents* |
|
616 * performance degradation. |
|
617 * <appro@fy.chalmers.se> |
|
618 * Apparently there're LP64 compilers that generate better |
|
619 * code if A-D are declared int. Most notably GCC-x86_64 |
|
620 * generates better code. |
|
621 * <appro@fy.chalmers.se> |
|
622 */ |
|
623 #endif |