/* * Copyright (C) 2006-2009 Vincent Hanquez * 2016 Herbert Valerio Riedel * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "sha1.h" #include #include #include #if defined(static_assert) static_assert(offsetof(struct sha1_ctx, h[5]) == SHA1_CTX_SIZE, "unexpected sha1_ctx size"); #else /* poor man's pre-C11 _Static_assert */ typedef char static_assertion__unexpected_sha1_ctx_size[(offsetof(struct sha1_ctx, h[5]) == SHA1_CTX_SIZE)?1:-1]; #endif #define ptr_uint32_aligned(ptr) (!((uintptr_t)(ptr) & 0x3)) static inline uint32_t rol32(const uint32_t word, const unsigned shift) { /* GCC usually transforms this into a 'rol'-insn */ return (word << shift) | (word >> (32 - shift)); } static inline uint32_t cpu_to_be32(const uint32_t hl) { #if WORDS_BIGENDIAN return hl; #elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2) return __builtin_bswap32(hl); #else /* GCC usually transforms this into a bswap insn */ return ((hl & 0xff000000) >> 24) | ((hl & 0x00ff0000) >> 8) | ((hl & 0x0000ff00) << 8) | ( hl << 24); #endif } static inline void cpu_to_be32_array(uint32_t *dest, const uint32_t *src, unsigned wordcnt) { while (wordcnt--) *dest++ = cpu_to_be32(*src++); } static inline uint64_t cpu_to_be64(const uint64_t hll) { #if WORDS_BIGENDIAN return hll; #elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2) return __builtin_bswap64(hll); #else return ((uint64_t)cpu_to_be32(hll & 0xffffffff) << 32LL) | cpu_to_be32(hll >> 32); #endif } void hs_cryptohash_sha1_init(struct sha1_ctx *ctx) { memset(ctx, 0, SHA1_CTX_SIZE); ctx->h[0] = 0x67452301; ctx->h[1] = 0xefcdab89; ctx->h[2] = 0x98badcfe; ctx->h[3] = 0x10325476; ctx->h[4] = 0xc3d2e1f0; } #define f1(x, y, z) (z ^ (x & (y ^ z))) #define f2(x, y, z) (x ^ y ^ z) #define f3(x, y, z) ((x & y) + (z & (x ^ y))) #define f4(x, y, z) f2(x, y, z) #define K1 0x5a827999 #define K2 0x6ed9eba1 #define K3 0x8f1bbcdc #define K4 0xca62c1d6 #define R(a, b, c, d, e, f, k, w) \ e += rol32(a, 5) + f(b, c, d) + k + w; b = rol32(b, 30) #define M(i) (w[i & 0x0f] = rol32(w[i & 0x0f] ^ w[(i - 14) & 0x0f] \ ^ w[(i - 8) & 0x0f] ^ w[(i - 3) & 0x0f], 1)) static void sha1_do_chunk_aligned(struct sha1_ctx *ctx, uint32_t w[]) { uint32_t a = ctx->h[0]; uint32_t b = ctx->h[1]; uint32_t c = ctx->h[2]; uint32_t d = ctx->h[3]; uint32_t e = ctx->h[4]; R(a, b, c, d, e, f1, K1, w[0]); R(e, a, b, c, d, f1, K1, w[1]); R(d, e, a, b, c, f1, K1, w[2]); R(c, d, e, a, b, f1, K1, w[3]); R(b, c, d, e, a, f1, K1, w[4]); R(a, b, c, d, e, f1, K1, w[5]); R(e, a, b, c, d, f1, K1, w[6]); R(d, e, a, b, c, f1, K1, w[7]); R(c, d, e, a, b, f1, K1, w[8]); R(b, c, d, e, a, f1, K1, w[9]); R(a, b, c, d, e, f1, K1, w[10]); R(e, a, b, c, d, f1, K1, w[11]); R(d, e, a, b, c, f1, K1, w[12]); R(c, d, e, a, b, f1, K1, w[13]); R(b, c, d, e, a, f1, K1, w[14]); R(a, b, c, d, e, f1, K1, w[15]); R(e, a, b, c, d, f1, K1, M(16)); R(d, e, a, b, c, f1, K1, M(17)); R(c, d, e, a, b, f1, K1, M(18)); R(b, c, d, e, a, f1, K1, M(19)); R(a, b, c, d, e, f2, K2, M(20)); R(e, a, b, c, d, f2, K2, M(21)); R(d, e, a, b, c, f2, K2, M(22)); R(c, d, e, a, b, f2, K2, M(23)); R(b, c, d, e, a, f2, K2, M(24)); R(a, b, c, d, e, f2, K2, M(25)); R(e, a, b, c, d, f2, K2, M(26)); R(d, e, a, b, c, f2, K2, M(27)); R(c, d, e, a, b, f2, K2, M(28)); R(b, c, d, e, a, f2, K2, M(29)); R(a, b, c, d, e, f2, K2, M(30)); R(e, a, b, c, d, f2, K2, M(31)); R(d, e, a, b, c, f2, K2, M(32)); R(c, d, e, a, b, f2, K2, M(33)); R(b, c, d, e, a, f2, K2, M(34)); R(a, b, c, d, e, f2, K2, M(35)); R(e, a, b, c, d, f2, K2, M(36)); R(d, e, a, b, c, f2, K2, M(37)); R(c, d, e, a, b, f2, K2, M(38)); R(b, c, d, e, a, f2, K2, M(39)); R(a, b, c, d, e, f3, K3, M(40)); R(e, a, b, c, d, f3, K3, M(41)); R(d, e, a, b, c, f3, K3, M(42)); R(c, d, e, a, b, f3, K3, M(43)); R(b, c, d, e, a, f3, K3, M(44)); R(a, b, c, d, e, f3, K3, M(45)); R(e, a, b, c, d, f3, K3, M(46)); R(d, e, a, b, c, f3, K3, M(47)); R(c, d, e, a, b, f3, K3, M(48)); R(b, c, d, e, a, f3, K3, M(49)); R(a, b, c, d, e, f3, K3, M(50)); R(e, a, b, c, d, f3, K3, M(51)); R(d, e, a, b, c, f3, K3, M(52)); R(c, d, e, a, b, f3, K3, M(53)); R(b, c, d, e, a, f3, K3, M(54)); R(a, b, c, d, e, f3, K3, M(55)); R(e, a, b, c, d, f3, K3, M(56)); R(d, e, a, b, c, f3, K3, M(57)); R(c, d, e, a, b, f3, K3, M(58)); R(b, c, d, e, a, f3, K3, M(59)); R(a, b, c, d, e, f4, K4, M(60)); R(e, a, b, c, d, f4, K4, M(61)); R(d, e, a, b, c, f4, K4, M(62)); R(c, d, e, a, b, f4, K4, M(63)); R(b, c, d, e, a, f4, K4, M(64)); R(a, b, c, d, e, f4, K4, M(65)); R(e, a, b, c, d, f4, K4, M(66)); R(d, e, a, b, c, f4, K4, M(67)); R(c, d, e, a, b, f4, K4, M(68)); R(b, c, d, e, a, f4, K4, M(69)); R(a, b, c, d, e, f4, K4, M(70)); R(e, a, b, c, d, f4, K4, M(71)); R(d, e, a, b, c, f4, K4, M(72)); R(c, d, e, a, b, f4, K4, M(73)); R(b, c, d, e, a, f4, K4, M(74)); R(a, b, c, d, e, f4, K4, M(75)); R(e, a, b, c, d, f4, K4, M(76)); R(d, e, a, b, c, f4, K4, M(77)); R(c, d, e, a, b, f4, K4, M(78)); R(b, c, d, e, a, f4, K4, M(79)); ctx->h[0] += a; ctx->h[1] += b; ctx->h[2] += c; ctx->h[3] += d; ctx->h[4] += e; } static void sha1_do_chunk(struct sha1_ctx *ctx, const uint8_t buf[]) { uint32_t w[16]; if (ptr_uint32_aligned(buf)) { /* aligned buf */ cpu_to_be32_array(w, (const uint32_t *)buf, 16); } else { /* unaligned buf */ memcpy(w, buf, 64); #if !WORDS_BIGENDIAN cpu_to_be32_array(w, w, 16); #endif } sha1_do_chunk_aligned(ctx, w); } void hs_cryptohash_sha1_update(struct sha1_ctx *ctx, const uint8_t *data, size_t len) { size_t index = ctx->sz & 0x3f; const size_t to_fill = 64 - index; ctx->sz += len; /* process partial buffer if there's enough data to make a block */ if (index && len >= to_fill) { memcpy(ctx->buf + index, data, to_fill); sha1_do_chunk(ctx, ctx->buf); /* memset(ctx->buf, 0, 64); */ len -= to_fill; data += to_fill; index = 0; } /* process as many 64-blocks as possible */ while (len >= 64) { sha1_do_chunk(ctx, data); len -= 64; data += 64; } /* append data into buf */ if (len) memcpy(ctx->buf + index, data, len); } void hs_cryptohash_sha1_finalize(struct sha1_ctx *ctx, uint8_t *out) { static uint8_t padding[64] = { 0x80, }; /* add padding and update data with it */ uint64_t bits = cpu_to_be64(ctx->sz << 3); /* pad out to 56 */ const size_t index = ctx->sz & 0x3f; const size_t padlen = (index < 56) ? (56 - index) : ((64 + 56) - index); hs_cryptohash_sha1_update(ctx, padding, padlen); /* append length */ hs_cryptohash_sha1_update(ctx, (uint8_t *) &bits, sizeof(bits)); /* output hash */ cpu_to_be32_array((uint32_t *) out, ctx->h, 5); }