# HG changeset patch # User Sebastien Decugis # Date 1245829210 -32400 # Node ID afa2b5ffe44c635b3e64dab6fe9fdd10e707a153 # Parent 5e793a4e24505e9612b5d382ab49d6da9b92e04d Allow unaligned strings in uti_hash (as received inside RADIUS attributes) diff -r 5e793a4e2450 -r afa2b5ffe44c waaad/tests/testsess.c --- a/waaad/tests/testsess.c Wed Jun 24 16:06:24 2009 +0900 +++ b/waaad/tests/testsess.c Wed Jun 24 16:40:10 2009 +0900 @@ -72,6 +72,12 @@ g_pconf->diameter_identity = TEST_DIAM_ID; g_conf->diamid_len = strlen(TEST_DIAM_ID); + /* Check that the hash function works properly with unaligned data */ + { + char * unalign = "a" TEST_DIAM_ID; + CHECK( uti_hash ( g_pconf->diameter_identity, g_conf->diamid_len ), uti_hash ( unalign + 1, g_conf->diamid_len )); + } + /* Tests creation and destruction of sessions */ { sess_id_t * session = NULL; diff -r 5e793a4e2450 -r afa2b5ffe44c waaad/utils.c --- a/waaad/utils.c Wed Jun 24 16:06:24 2009 +0900 +++ b/waaad/utils.c Wed Jun 24 16:40:10 2009 +0900 @@ -229,7 +229,7 @@ /* Hash function -- credits to Austin Appleby, thank you ^^ */ /* See http://murmurhash.googlepages.com for more information on this function */ -/* the strings are always aligned properly, so we use the simple MurmurHash2 function */ +/* the strings are NOT always aligned properly (ex: received in RADIUS message), so we use the aligned MurmurHash2 function as needed */ #define _HASH_MIX(h,k,m) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; } uint32_t uti_hash ( char * string, size_t len ) { @@ -238,27 +238,109 @@ const unsigned int m = 0x5bd1e995; const int r = 24; + int align = (long)string & 3; - /* Check the alignment is really correct -- otherwise we have to switch to the other version */ - assert(((long)string & 3) == 0); + if (!align || (len < 4)) { + + /* In case data is aligned, MurmurHash2 function */ + while(len >= 4) + { + /* Mix 4 bytes at a time into the hash */ + uint32_t k = *(uint32_t *)data; /* We don't care about the byte order */ + + _HASH_MIX(hash, k, m); + + data += 4; + len -= 4; + } - while(len >= 4) - { - /* Mix 4 bytes at a time into the hash */ - uint32_t k = *(uint32_t *)data; /* We don't care about the byte order */ + /* Handle the last few bytes of the input */ + switch(len) { + case 3: hash ^= data[2] << 16; + case 2: hash ^= data[1] << 8; + case 1: hash ^= data[0]; + hash *= m; + } + + } else { + /* Unaligned data, use aligned slower version */ - _HASH_MIX(hash, k, m); + /* Pre-load the temp registers */ + uint32_t t = 0, d = 0; + switch(align) + { + case 1: t |= data[2] << 16; + case 2: t |= data[1] << 8; + case 3: t |= data[0]; + } + t <<= (8 * align); + + data += 4-align; + len -= 4-align; + + /* From this point, "data" can be read by chunks of 4 bytes */ + + int sl = 8 * (4-align); + int sr = 8 * align; - data += 4; - len -= 4; - } - - /* Handle the last few bytes of the input */ - switch(len) { - case 3: hash ^= data[2] << 16; - case 2: hash ^= data[1] << 8; - case 1: hash ^= data[0]; - hash *= m; + /* Mix */ + while(len >= 4) + { + uint32_t k; + + d = *(unsigned int *)data; + k = (t >> sr) | (d << sl); + + _HASH_MIX(hash, k, m); + + t = d; + + data += 4; + len -= 4; + } + + /* Handle leftover data in temp registers */ + d = 0; + if(len >= align) + { + uint32_t k; + + switch(align) + { + case 3: d |= data[2] << 16; + case 2: d |= data[1] << 8; + case 1: d |= data[0]; + } + + k = (t >> sr) | (d << sl); + _HASH_MIX(hash, k, m); + + data += align; + len -= align; + + /* Handle tail bytes */ + + switch(len) + { + case 3: hash ^= data[2] << 16; + case 2: hash ^= data[1] << 8; + case 1: hash ^= data[0]; + hash *= m; + }; + } + else + { + switch(len) + { + case 3: d |= data[2] << 16; + case 2: d |= data[1] << 8; + case 1: d |= data[0]; + case 0: hash ^= (t >> sr) | (d << sl); + hash *= m; + } + } + + } /* Do a few final mixes of the hash to ensure the last few