Split up and optimize transaction and block inv queues

pull/262/head
Pieter Wuille 9 years ago
parent f2d3ba7386
commit dc13dcd2be

@ -5569,18 +5569,11 @@ public:
mp = mempool;
}
bool operator()(const CInv &a, const CInv &b)
bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b)
{
if (a.type != MSG_TX && b.type != MSG_TX) {
return false;
} else {
if (a.type != MSG_TX) {
return true;
} else if (b.type != MSG_TX) {
return false;
}
return mp->CompareDepthAndScore(a.hash, b.hash);
}
/* As std::make_heap produces a max-heap, we want the entries with the
* fewest ancestors/highest fee to sort later. */
return mp->CompareDepthAndScore(*b, *a);
}
};
@ -5808,38 +5801,59 @@ bool SendMessages(CNode* pto)
// Message: inventory
//
vector<CInv> vInv;
vector<CInv> vInvWait;
{
LOCK(pto->cs_inventory);
vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(), INVENTORY_BROADCAST_MAX));
// Add blocks
BOOST_FOREACH(const uint256& hash, pto->vInventoryBlockToSend) {
vInv.push_back(CInv(MSG_BLOCK, hash));
if (vInv.size() == MAX_INV_SZ) {
pto->PushMessage(NetMsgType::INV, vInv);
vInv.clear();
}
}
pto->vInventoryBlockToSend.clear();
// Determine transactions to relay
bool fSendTrickle = pto->fWhitelisted;
if (pto->nNextInvSend < nNow) {
fSendTrickle = true;
// Use half the delay for outbound peers, as their is less privacy concern for them.
// Use half the delay for outbound peers, as there is less privacy concern for them.
pto->nNextInvSend = PoissonNextSend(nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound);
}
LOCK(pto->cs_inventory);
if (fSendTrickle && pto->vInventoryToSend.size() > 1) {
if (fSendTrickle) {
// Produce a vector with all candidates for sending
vector<std::set<uint256>::iterator> vInvTx;
vInvTx.reserve(pto->setInventoryTxToSend.size());
for (std::set<uint256>::iterator it = pto->setInventoryTxToSend.begin(); it != pto->setInventoryTxToSend.end(); it++) {
vInvTx.push_back(it);
}
// Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
// A heap is used so that not all items need sorting if only a few are being sent.
CompareInvMempoolOrder compareInvMempoolOrder(&mempool);
std::stable_sort(pto->vInventoryToSend.begin(), pto->vInventoryToSend.end(), compareInvMempoolOrder);
}
vInv.reserve(std::min<size_t>(INVENTORY_BROADCAST_MAX, pto->vInventoryToSend.size()));
vInvWait.reserve(pto->vInventoryToSend.size());
BOOST_FOREACH(const CInv& inv, pto->vInventoryToSend)
{
if (inv.type == MSG_TX && pto->filterInventoryKnown.contains(inv.hash))
continue;
std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
// No reason to drain out at many times the network's capacity,
// especially since we have many peers and some will draw much shorter delays.
if (vInv.size() >= INVENTORY_BROADCAST_MAX || (inv.type == MSG_TX && !fSendTrickle)) {
vInvWait.push_back(inv);
continue;
unsigned int nRelayedTransactions = 0;
while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX) {
// Fetch the top element from the heap
std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
std::set<uint256>::iterator it = vInvTx.back();
vInvTx.pop_back();
uint256 hash = *it;
// Remove it from the to-be-sent set
pto->setInventoryTxToSend.erase(it);
// Check if not in the filter already
if (pto->filterInventoryKnown.contains(hash)) {
continue;
}
// Send
vInv.push_back(CInv(MSG_TX, hash));
nRelayedTransactions++;
pto->filterInventoryKnown.insert(hash);
}
pto->filterInventoryKnown.insert(inv.hash);
vInv.push_back(inv);
}
pto->vInventoryToSend = vInvWait;
}
if (!vInv.empty())
pto->PushMessage(NetMsgType::INV, vInv);

@ -397,7 +397,13 @@ public:
// inventory based relay
CRollingBloomFilter filterInventoryKnown;
std::vector<CInv> vInventoryToSend;
// Set of transaction ids we still have to announce.
// They are sorted by the mempool before relay, so the order is not important.
std::set<uint256> setInventoryTxToSend;
// List of block ids we still have announce.
// There is no final sorting before sending, as they are always sent immediately
// and in the order requested.
std::vector<uint256> vInventoryBlockToSend;
CCriticalSection cs_inventory;
std::set<uint256> setAskFor;
std::multimap<int64_t, CInv> mapAskFor;
@ -517,11 +523,13 @@ public:
void PushInventory(const CInv& inv)
{
{
LOCK(cs_inventory);
if (inv.type == MSG_TX && filterInventoryKnown.contains(inv.hash))
return;
vInventoryToSend.push_back(inv);
LOCK(cs_inventory);
if (inv.type == MSG_TX) {
if (!filterInventoryKnown.contains(inv.hash)) {
setInventoryTxToSend.insert(inv.hash);
}
} else if (inv.type == MSG_BLOCK) {
vInventoryBlockToSend.push_back(inv.hash);
}
}

Loading…
Cancel
Save