diff --git a/src/bloom.cpp b/src/bloom.cpp index f07b5b60664..f8e28edded5 100644 --- a/src/bloom.cpp +++ b/src/bloom.cpp @@ -245,6 +245,14 @@ static inline uint32_t RollingBloomHash(unsigned int nHashNum, uint32_t nTweak, return MurmurHash3(nHashNum * 0xFBA4C795 + nTweak, vDataToHash); } + +// A replacement for x % n. This assumes that x and n are 32bit integers, and x is a uniformly random distributed 32bit value +// which should be the case for a good hash. +// See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ +static inline uint32_t FastMod(uint32_t x, size_t n) { + return ((uint64_t)x * (uint64_t)n) >> 32; +} + void CRollingBloomFilter::insert(const std::vector& vKey) { if (nEntriesThisGeneration == nEntriesPerGeneration) { @@ -268,7 +276,8 @@ void CRollingBloomFilter::insert(const std::vector& vKey) for (int n = 0; n < nHashFuncs; n++) { uint32_t h = RollingBloomHash(n, nTweak, vKey); int bit = h & 0x3F; - uint32_t pos = (h >> 6) % data.size(); + /* FastMod works with the upper bits of h, so it is safe to ignore that the lower bits of h are already used for bit. */ + uint32_t pos = FastMod(h, data.size()); /* The lowest bit of pos is ignored, and set to zero for the first bit, and to one for the second. */ data[pos & ~1] = (data[pos & ~1] & ~(((uint64_t)1) << bit)) | ((uint64_t)(nGeneration & 1)) << bit; data[pos | 1] = (data[pos | 1] & ~(((uint64_t)1) << bit)) | ((uint64_t)(nGeneration >> 1)) << bit; @@ -286,7 +295,7 @@ bool CRollingBloomFilter::contains(const std::vector& vKey) const for (int n = 0; n < nHashFuncs; n++) { uint32_t h = RollingBloomHash(n, nTweak, vKey); int bit = h & 0x3F; - uint32_t pos = (h >> 6) % data.size(); + uint32_t pos = FastMod(h, data.size()); /* If the relevant bit is not set in either data[pos & ~1] or data[pos | 1], the filter does not contain vKey */ if (!(((data[pos & ~1] | data[pos | 1]) >> bit) & 1)) { return false;