1 /* $NetBSD: sha1.c,v 1.1 2005/12/20 20:29:40 christos Exp $ */
2 /* $OpenBSD: sha1.c,v 1.9 1997/07/23 21:12:32 kstailey Exp $ */
6 * By Steve Reid <steve@edmweb.com>
9 * Test Vectors (from FIPS PUB 180-1)
11 * A9993E36 4706816A BA3E2571 7850C26C 9CD0D89D
12 * "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"
13 * 84983E44 1C3BD26E BAAE4AA1 F95129E5 E54670F1
14 * A million repetitions of "a"
15 * 34AA973C D4C4DAA4 F61EEB2B DBAD2731 6534016F
18 #define SHA1HANDSOFF /* Copies data before messing with it. */
21 #include <sys/cdefs.h>
23 #include <sys/types.h>
29 #if HAVE_NBTOOL_CONFIG_H
30 #include "nbtool_config.h"
35 #define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits))))
38 * blk0() and blk() perform the initial expand.
39 * I got the idea of expanding during the round function from SSLeay
41 #if BYTE_ORDER == LITTLE_ENDIAN
42 # define blk0(i) (block->l[i] = (rol(block->l[i],24)&0xFF00FF00) \
43 |(rol(block->l[i],8)&0x00FF00FF))
45 # define blk0(i) block->l[i]
47 #define blk(i) (block->l[i&15] = rol(block->l[(i+13)&15]^block->l[(i+8)&15] \
48 ^block->l[(i+2)&15]^block->l[i&15],1))
51 * (R0+R1), R2, R3, R4 are the different operations (rounds) used in SHA1
53 #define R0(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk0(i)+0x5A827999+rol(v,5);w=rol(w,30);
54 #define R1(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk(i)+0x5A827999+rol(v,5);w=rol(w,30);
55 #define R2(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0x6ED9EBA1+rol(v,5);w=rol(w,30);
56 #define R3(v,w,x,y,z,i) z+=(((w|x)&y)|(w&x))+blk(i)+0x8F1BBCDC+rol(v,5);w=rol(w,30);
57 #define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1D6+rol(v,5);w=rol(w,30);
64 /* old sparc64 gcc could not compile this */
65 #undef SPARC64_GCC_WORKAROUND
66 #if defined(__sparc64__) && defined(__GNUC__) && __GNUC__ < 3
67 #define SPARC64_GCC_WORKAROUND
70 #ifdef SPARC64_GCC_WORKAROUND
71 void do_R01(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *);
72 void do_R2(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *);
73 void do_R3(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *);
74 void do_R4(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *);
76 #define nR0(v,w,x,y,z,i) R0(*v,*w,*x,*y,*z,i)
77 #define nR1(v,w,x,y,z,i) R1(*v,*w,*x,*y,*z,i)
78 #define nR2(v,w,x,y,z,i) R2(*v,*w,*x,*y,*z,i)
79 #define nR3(v,w,x,y,z,i) R3(*v,*w,*x,*y,*z,i)
80 #define nR4(v,w,x,y,z,i) R4(*v,*w,*x,*y,*z,i)
83 do_R01(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *block)
85 nR0(a,b,c,d,e, 0); nR0(e,a,b,c,d, 1); nR0(d,e,a,b,c, 2); nR0(c,d,e,a,b, 3);
86 nR0(b,c,d,e,a, 4); nR0(a,b,c,d,e, 5); nR0(e,a,b,c,d, 6); nR0(d,e,a,b,c, 7);
87 nR0(c,d,e,a,b, 8); nR0(b,c,d,e,a, 9); nR0(a,b,c,d,e,10); nR0(e,a,b,c,d,11);
88 nR0(d,e,a,b,c,12); nR0(c,d,e,a,b,13); nR0(b,c,d,e,a,14); nR0(a,b,c,d,e,15);
89 nR1(e,a,b,c,d,16); nR1(d,e,a,b,c,17); nR1(c,d,e,a,b,18); nR1(b,c,d,e,a,19);
93 do_R2(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *block)
95 nR2(a,b,c,d,e,20); nR2(e,a,b,c,d,21); nR2(d,e,a,b,c,22); nR2(c,d,e,a,b,23);
96 nR2(b,c,d,e,a,24); nR2(a,b,c,d,e,25); nR2(e,a,b,c,d,26); nR2(d,e,a,b,c,27);
97 nR2(c,d,e,a,b,28); nR2(b,c,d,e,a,29); nR2(a,b,c,d,e,30); nR2(e,a,b,c,d,31);
98 nR2(d,e,a,b,c,32); nR2(c,d,e,a,b,33); nR2(b,c,d,e,a,34); nR2(a,b,c,d,e,35);
99 nR2(e,a,b,c,d,36); nR2(d,e,a,b,c,37); nR2(c,d,e,a,b,38); nR2(b,c,d,e,a,39);
103 do_R3(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *block)
105 nR3(a,b,c,d,e,40); nR3(e,a,b,c,d,41); nR3(d,e,a,b,c,42); nR3(c,d,e,a,b,43);
106 nR3(b,c,d,e,a,44); nR3(a,b,c,d,e,45); nR3(e,a,b,c,d,46); nR3(d,e,a,b,c,47);
107 nR3(c,d,e,a,b,48); nR3(b,c,d,e,a,49); nR3(a,b,c,d,e,50); nR3(e,a,b,c,d,51);
108 nR3(d,e,a,b,c,52); nR3(c,d,e,a,b,53); nR3(b,c,d,e,a,54); nR3(a,b,c,d,e,55);
109 nR3(e,a,b,c,d,56); nR3(d,e,a,b,c,57); nR3(c,d,e,a,b,58); nR3(b,c,d,e,a,59);
113 do_R4(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *block)
115 nR4(a,b,c,d,e,60); nR4(e,a,b,c,d,61); nR4(d,e,a,b,c,62); nR4(c,d,e,a,b,63);
116 nR4(b,c,d,e,a,64); nR4(a,b,c,d,e,65); nR4(e,a,b,c,d,66); nR4(d,e,a,b,c,67);
117 nR4(c,d,e,a,b,68); nR4(b,c,d,e,a,69); nR4(a,b,c,d,e,70); nR4(e,a,b,c,d,71);
118 nR4(d,e,a,b,c,72); nR4(c,d,e,a,b,73); nR4(b,c,d,e,a,74); nR4(a,b,c,d,e,75);
119 nR4(e,a,b,c,d,76); nR4(d,e,a,b,c,77); nR4(c,d,e,a,b,78); nR4(b,c,d,e,a,79);
124 * Hash a single 512-bit block. This is the core of the algorithm.
126 void SHA1Transform(state, buffer)
128 const u_char buffer[64];
130 u_int32_t a, b, c, d, e;
134 CHAR64LONG16 workspace;
142 (void)memcpy(block, buffer, 64);
144 block = (CHAR64LONG16 *)(void *)buffer;
147 /* Copy context->state[] to working vars */
154 #ifdef SPARC64_GCC_WORKAROUND
155 do_R01(&a, &b, &c, &d, &e, block);
156 do_R2(&a, &b, &c, &d, &e, block);
157 do_R3(&a, &b, &c, &d, &e, block);
158 do_R4(&a, &b, &c, &d, &e, block);
160 /* 4 rounds of 20 operations each. Loop unrolled. */
161 R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3);
162 R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7);
163 R0(c,d,e,a,b, 8); R0(b,c,d,e,a, 9); R0(a,b,c,d,e,10); R0(e,a,b,c,d,11);
164 R0(d,e,a,b,c,12); R0(c,d,e,a,b,13); R0(b,c,d,e,a,14); R0(a,b,c,d,e,15);
165 R1(e,a,b,c,d,16); R1(d,e,a,b,c,17); R1(c,d,e,a,b,18); R1(b,c,d,e,a,19);
166 R2(a,b,c,d,e,20); R2(e,a,b,c,d,21); R2(d,e,a,b,c,22); R2(c,d,e,a,b,23);
167 R2(b,c,d,e,a,24); R2(a,b,c,d,e,25); R2(e,a,b,c,d,26); R2(d,e,a,b,c,27);
168 R2(c,d,e,a,b,28); R2(b,c,d,e,a,29); R2(a,b,c,d,e,30); R2(e,a,b,c,d,31);
169 R2(d,e,a,b,c,32); R2(c,d,e,a,b,33); R2(b,c,d,e,a,34); R2(a,b,c,d,e,35);
170 R2(e,a,b,c,d,36); R2(d,e,a,b,c,37); R2(c,d,e,a,b,38); R2(b,c,d,e,a,39);
171 R3(a,b,c,d,e,40); R3(e,a,b,c,d,41); R3(d,e,a,b,c,42); R3(c,d,e,a,b,43);
172 R3(b,c,d,e,a,44); R3(a,b,c,d,e,45); R3(e,a,b,c,d,46); R3(d,e,a,b,c,47);
173 R3(c,d,e,a,b,48); R3(b,c,d,e,a,49); R3(a,b,c,d,e,50); R3(e,a,b,c,d,51);
174 R3(d,e,a,b,c,52); R3(c,d,e,a,b,53); R3(b,c,d,e,a,54); R3(a,b,c,d,e,55);
175 R3(e,a,b,c,d,56); R3(d,e,a,b,c,57); R3(c,d,e,a,b,58); R3(b,c,d,e,a,59);
176 R4(a,b,c,d,e,60); R4(e,a,b,c,d,61); R4(d,e,a,b,c,62); R4(c,d,e,a,b,63);
177 R4(b,c,d,e,a,64); R4(a,b,c,d,e,65); R4(e,a,b,c,d,66); R4(d,e,a,b,c,67);
178 R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71);
179 R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75);
180 R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79);
183 /* Add the working vars back into context.state[] */
191 a = b = c = d = e = 0;
196 * SHA1Init - Initialize new context
198 void SHA1Init(context)
202 assert(context != 0);
204 /* SHA1 initialization constants */
205 context->state[0] = 0x67452301;
206 context->state[1] = 0xEFCDAB89;
207 context->state[2] = 0x98BADCFE;
208 context->state[3] = 0x10325476;
209 context->state[4] = 0xC3D2E1F0;
210 context->count[0] = context->count[1] = 0;
215 * Run your data through this.
217 void SHA1Update(context, data, len)
224 assert(context != 0);
227 j = context->count[0];
228 if ((context->count[0] += len << 3) < j)
229 context->count[1] += (len>>29)+1;
231 if ((j + len) > 63) {
232 (void)memcpy(&context->buffer[j], data, (i = 64-j));
233 SHA1Transform(context->state, context->buffer);
234 for ( ; i + 63 < len; i += 64)
235 SHA1Transform(context->state, &data[i]);
240 (void)memcpy(&context->buffer[j], &data[i], len - i);
245 * Add padding and return the message digest.
247 void SHA1Final(digest, context)
252 u_char finalcount[8];
255 assert(context != 0);
257 for (i = 0; i < 8; i++) {
258 finalcount[i] = (u_char)((context->count[(i >= 4 ? 0 : 1)]
259 >> ((3-(i & 3)) * 8) ) & 255); /* Endian independent */
261 SHA1Update(context, (const u_char *)"\200", 1);
262 while ((context->count[0] & 504) != 448)
263 SHA1Update(context, (const u_char *)"\0", 1);
264 SHA1Update(context, finalcount, 8); /* Should cause a SHA1Transform() */
267 for (i = 0; i < 20; i++)
269 ((context->state[i>>2] >> ((3-(i & 3)) * 8) ) & 255);
273 #endif /* HAVE_SHA1_H */