2 ---------------------------------------------------------------------------
3 Copyright (c) 1998-2010, Brian Gladman, Worcester, UK. All rights reserved.
5 The redistribution and use of this software (with or without changes)
6 is allowed without the payment of fees or royalties provided that:
8 source code distributions include the above copyright notice, this
9 list of conditions and the following disclaimer;
11 binary distributions include the above copyright notice, this list
12 of conditions and the following disclaimer in their documentation.
14 This software is provided 'as is' with no explicit or implied warranties
15 in respect of its operation, including, but not limited to, correctness
16 and fitness for purpose.
17 ---------------------------------------------------------------------------
18 Issue Date: 20/12/2007
21 #include <string.h> /* for memcpy() etc. */
24 #include "brg_endian.h"
26 #if defined(__cplusplus)
31 #if defined( _MSC_VER ) && ( _MSC_VER > 800 )
32 #pragma intrinsic(memcpy)
33 #pragma intrinsic(memset)
36 #if 0 && defined(_MSC_VER)
40 #define rotl32(x,n) (((x) << n) | ((x) >> (32 - n)))
41 #define rotr32(x,n) (((x) >> n) | ((x) << (32 - n)))
44 #if !defined(bswap_32)
45 #define bswap_32(x) ((rotr32((x), 24) & 0x00ff00ff) | (rotr32((x), 8) & 0xff00ff00))
48 #if (PLATFORM_BYTE_ORDER == IS_LITTLE_ENDIAN)
54 #if defined(SWAP_BYTES)
56 { int _i = (n); while(_i--) ((uint32_t*)p)[_i] = bswap_32(((uint32_t*)p)[_i]); }
61 #define SHA1_MASK (SHA1_BLOCK_SIZE - 1)
65 #define ch(x,y,z) (((x) & (y)) ^ (~(x) & (z)))
66 #define parity(x,y,z) ((x) ^ (y) ^ (z))
67 #define maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
69 #else /* Discovered by Rich Schroeppel and Colin Plumb */
71 #define ch(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
72 #define parity(x,y,z) ((x) ^ (y) ^ (z))
73 #define maj(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
77 /* Compile 64 bytes of hash data into SHA1 context. Note */
78 /* that this routine assumes that the byte order in the */
79 /* ctx->wbuf[] at this point is in such an order that low */
80 /* address bytes in the ORIGINAL byte stream will go in */
81 /* this buffer to the high end of 32-bit words on BOTH big */
82 /* and little endian systems */
92 #define one_cycle(v,a,b,c,d,e,f,k,h) \
93 q(v,e) += rotr32(q(v,a),27) + \
94 f(q(v,b),q(v,c),q(v,d)) + k + h; \
95 q(v,b) = rotr32(q(v,b), 2)
97 #define five_cycle(v,f,k,i) \
98 one_cycle(v, 0,1,2,3,4, f,k,hf(i )); \
99 one_cycle(v, 4,0,1,2,3, f,k,hf(i+1)); \
100 one_cycle(v, 3,4,0,1,2, f,k,hf(i+2)); \
101 one_cycle(v, 2,3,4,0,1, f,k,hf(i+3)); \
102 one_cycle(v, 1,2,3,4,0, f,k,hf(i+4))
104 VOID_RETURN sha1_compile(sha1_ctx ctx[1])
105 { uint32_t *w = ctx->wbuf;
109 memcpy(v, ctx->hash, sizeof(ctx->hash));
111 uint32_t v0, v1, v2, v3, v4;
112 v0 = ctx->hash[0]; v1 = ctx->hash[1];
113 v2 = ctx->hash[2]; v3 = ctx->hash[3];
119 five_cycle(v, ch, 0x5a827999, 0);
120 five_cycle(v, ch, 0x5a827999, 5);
121 five_cycle(v, ch, 0x5a827999, 10);
122 one_cycle(v,0,1,2,3,4, ch, 0x5a827999, hf(15)); \
125 #define hf(i) (w[(i) & 15] = rotl32( \
126 w[((i) + 13) & 15] ^ w[((i) + 8) & 15] \
127 ^ w[((i) + 2) & 15] ^ w[(i) & 15], 1))
129 one_cycle(v,4,0,1,2,3, ch, 0x5a827999, hf(16));
130 one_cycle(v,3,4,0,1,2, ch, 0x5a827999, hf(17));
131 one_cycle(v,2,3,4,0,1, ch, 0x5a827999, hf(18));
132 one_cycle(v,1,2,3,4,0, ch, 0x5a827999, hf(19));
134 five_cycle(v, parity, 0x6ed9eba1, 20);
135 five_cycle(v, parity, 0x6ed9eba1, 25);
136 five_cycle(v, parity, 0x6ed9eba1, 30);
137 five_cycle(v, parity, 0x6ed9eba1, 35);
139 five_cycle(v, maj, 0x8f1bbcdc, 40);
140 five_cycle(v, maj, 0x8f1bbcdc, 45);
141 five_cycle(v, maj, 0x8f1bbcdc, 50);
142 five_cycle(v, maj, 0x8f1bbcdc, 55);
144 five_cycle(v, parity, 0xca62c1d6, 60);
145 five_cycle(v, parity, 0xca62c1d6, 65);
146 five_cycle(v, parity, 0xca62c1d6, 70);
147 five_cycle(v, parity, 0xca62c1d6, 75);
150 ctx->hash[0] += v[0]; ctx->hash[1] += v[1];
151 ctx->hash[2] += v[2]; ctx->hash[3] += v[3];
152 ctx->hash[4] += v[4];
154 ctx->hash[0] += v0; ctx->hash[1] += v1;
155 ctx->hash[2] += v2; ctx->hash[3] += v3;
160 VOID_RETURN sha1_begin(sha1_ctx ctx[1])
162 memset(ctx, 0, sizeof(sha1_ctx));
163 ctx->hash[0] = 0x67452301;
164 ctx->hash[1] = 0xefcdab89;
165 ctx->hash[2] = 0x98badcfe;
166 ctx->hash[3] = 0x10325476;
167 ctx->hash[4] = 0xc3d2e1f0;
170 /* SHA1 hash data in an array of bytes into hash buffer and */
171 /* call the hash_compile function as required. For both the */
172 /* bit and byte orientated versions, the block length 'len' */
173 /* must not be greater than 2^32 - 1 bits (2^29 - 1 bytes) */
175 VOID_RETURN sha1_hash(const unsigned char data[], unsigned long len, sha1_ctx ctx[1])
176 { uint32_t pos = (uint32_t)((ctx->count[0] >> 3) & SHA1_MASK);
177 const unsigned char *sp = data;
178 unsigned char *w = (unsigned char*)ctx->wbuf;
180 uint32_t ofs = (ctx->count[0] & 7);
184 if((ctx->count[0] += len) < len)
187 if(ofs) /* if not on a byte boundary */
189 if(ofs + len < 8) /* if no added bytes are needed */
191 w[pos] |= (*sp >> ofs);
193 else /* otherwise and add bytes */
194 { unsigned char part = w[pos];
196 while((int)(ofs + (len -= 8)) >= 0)
198 w[pos++] = part | (*sp >> ofs);
199 part = *sp++ << (8 - ofs);
200 if(pos == SHA1_BLOCK_SIZE)
202 bsw_32(w, SHA1_BLOCK_SIZE >> 2);
203 sha1_compile(ctx); pos = 0;
210 else /* data is byte aligned */
212 { uint32_t space = SHA1_BLOCK_SIZE - pos;
214 while(len >= (space << 3))
216 memcpy(w + pos, sp, space);
217 bsw_32(w, SHA1_BLOCK_SIZE >> 2);
219 sp += space; len -= (space << 3);
220 space = SHA1_BLOCK_SIZE; pos = 0;
222 memcpy(w + pos, sp, (len + 7 * SHA1_BITS) >> 3);
226 /* SHA1 final padding and digest calculation */
228 VOID_RETURN sha1_end(unsigned char hval[], sha1_ctx ctx[1])
229 { uint32_t i = (uint32_t)((ctx->count[0] >> 3) & SHA1_MASK), m1;
231 /* put bytes in the buffer in an order in which references to */
232 /* 32-bit words will put bytes with lower addresses into the */
233 /* top of 32 bit words on BOTH big and little endian machines */
234 bsw_32(ctx->wbuf, (i + 3 + SHA1_BITS) >> 2);
236 /* we now need to mask valid bytes and add the padding which is */
237 /* a single 1 bit and as many zero bits as necessary. Note that */
238 /* we can always add the first padding byte here because the */
239 /* buffer always has at least one empty slot */
240 m1 = (unsigned char)0x80 >> (ctx->count[0] & 7);
241 ctx->wbuf[i >> 2] &= ((0xffffff00 | (~m1 + 1)) << 8 * (~i & 3));
242 ctx->wbuf[i >> 2] |= (m1 << 8 * (~i & 3));
244 /* we need 9 or more empty positions, one for the padding byte */
245 /* (above) and eight for the length count. If there is not */
246 /* enough space, pad and empty the buffer */
247 if(i > SHA1_BLOCK_SIZE - 9)
249 if(i < 60) ctx->wbuf[15] = 0;
253 else /* compute a word index for the empty buffer positions */
256 while(i < 14) /* and zero pad all but last two positions */
259 /* the following 32-bit length fields are assembled in the */
260 /* wrong byte order on little endian machines but this is */
261 /* corrected later since they are only ever used as 32-bit */
263 ctx->wbuf[14] = ctx->count[1];
264 ctx->wbuf[15] = ctx->count[0];
267 /* extract the hash value as bytes in case the hash buffer is */
268 /* misaligned for 32-bit words */
269 for(i = 0; i < SHA1_DIGEST_SIZE; ++i)
270 hval[i] = ((ctx->hash[i >> 2] >> (8 * (~i & 3))) & 0xff);
273 VOID_RETURN sha1(unsigned char hval[], const unsigned char data[], unsigned long len)
276 sha1_begin(cx); sha1_hash(data, len, cx); sha1_end(hval, cx);
281 #if defined(__cplusplus)