Add sender-side protocol implementation for CMPCTBLOCK stuff

pull/8068/head
Matt Corallo 9 years ago
parent 00c40784fe
commit 9c837d5468

@ -274,6 +274,10 @@ struct CNodeState {
bool fPreferredDownload; bool fPreferredDownload;
//! Whether this peer wants invs or headers (when possible) for block announcements. //! Whether this peer wants invs or headers (when possible) for block announcements.
bool fPreferHeaders; bool fPreferHeaders;
//! Whether this peer wants invs or cmpctblocks (when possible) for block announcements.
bool fPreferHeaderAndIDs;
//! Whether this peer will send us cmpctblocks if we request them
bool fProvidesHeaderAndIDs;
CNodeState() { CNodeState() {
fCurrentlyConnected = false; fCurrentlyConnected = false;
@ -290,6 +294,8 @@ struct CNodeState {
nBlocksInFlightValidHeaders = 0; nBlocksInFlightValidHeaders = 0;
fPreferredDownload = false; fPreferredDownload = false;
fPreferHeaders = false; fPreferHeaders = false;
fPreferHeaderAndIDs = false;
fProvidesHeaderAndIDs = false;
} }
}; };
@ -4454,7 +4460,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
boost::this_thread::interruption_point(); boost::this_thread::interruption_point();
it++; it++;
if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK) if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK)
{ {
bool send = false; bool send = false;
BlockMap::iterator mi = mapBlockIndex.find(inv.hash); BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
@ -4496,7 +4502,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
assert(!"cannot load block from disk"); assert(!"cannot load block from disk");
if (inv.type == MSG_BLOCK) if (inv.type == MSG_BLOCK)
pfrom->PushMessage(NetMsgType::BLOCK, block); pfrom->PushMessage(NetMsgType::BLOCK, block);
else // MSG_FILTERED_BLOCK) else if (inv.type == MSG_FILTERED_BLOCK)
{ {
LOCK(pfrom->cs_filter); LOCK(pfrom->cs_filter);
if (pfrom->pfilter) if (pfrom->pfilter)
@ -4516,6 +4522,18 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
// else // else
// no response // no response
} }
else if (inv.type == MSG_CMPCT_BLOCK)
{
// If a peer is asking for old blocks, we're almost guaranteed
// they wont have a useful mempool to match against a compact block,
// and we dont feel like constructing the object for them, so
// instead we respond with the full, non-compact block.
if (mi->second->nHeight >= chainActive.Height() - 10) {
CBlockHeaderAndShortTxIDs cmpctblock(block);
pfrom->PushMessage(NetMsgType::CMPCTBLOCK, cmpctblock);
} else
pfrom->PushMessage(NetMsgType::BLOCK, block);
}
// Trigger the peer node to send a getblocks request for the next batch of inventory // Trigger the peer node to send a getblocks request for the next batch of inventory
if (inv.hash == pfrom->hashContinue) if (inv.hash == pfrom->hashContinue)
@ -4839,6 +4857,18 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
State(pfrom->GetId())->fPreferHeaders = true; State(pfrom->GetId())->fPreferHeaders = true;
} }
else if (strCommand == NetMsgType::SENDCMPCT)
{
bool fAnnounceUsingCMPCTBLOCK = false;
uint64_t nCMPCTBLOCKVersion = 1;
vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion;
if (nCMPCTBLOCKVersion == 1) {
LOCK(cs_main);
State(pfrom->GetId())->fProvidesHeaderAndIDs = true;
State(pfrom->GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK;
}
}
else if (strCommand == NetMsgType::INV) else if (strCommand == NetMsgType::INV)
{ {
@ -4982,6 +5012,39 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
} }
else if (strCommand == NetMsgType::GETBLOCKTXN)
{
BlockTransactionsRequest req;
vRecv >> req;
BlockMap::iterator it = mapBlockIndex.find(req.blockhash);
if (it == mapBlockIndex.end() || !(it->second->nStatus & BLOCK_HAVE_DATA)) {
Misbehaving(pfrom->GetId(), 100);
LogPrintf("Peer %d sent us a getblocktxn for a block we don't have", pfrom->id);
return true;
}
if (it->second->nHeight < chainActive.Height() - 10) {
LogPrint("net", "Peer %d sent us a getblocktxn for a block > 10 deep", pfrom->id);
return true;
}
CBlock block;
assert(ReadBlockFromDisk(block, it->second, chainparams.GetConsensus()));
BlockTransactions resp(req);
for (size_t i = 0; i < req.indexes.size(); i++) {
if (req.indexes[i] >= block.vtx.size()) {
Misbehaving(pfrom->GetId(), 100);
LogPrintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom->id);
return true;
}
resp.txn[i] = block.vtx[req.indexes[i]];
}
pfrom->PushMessage(NetMsgType::BLOCKTXN, resp);
}
else if (strCommand == NetMsgType::GETHEADERS) else if (strCommand == NetMsgType::GETHEADERS)
{ {
CBlockLocator locator; CBlockLocator locator;
@ -5824,7 +5887,9 @@ bool SendMessages(CNode* pto)
// add all to the inv queue. // add all to the inv queue.
LOCK(pto->cs_inventory); LOCK(pto->cs_inventory);
vector<CBlock> vHeaders; vector<CBlock> vHeaders;
bool fRevertToInv = (!state.fPreferHeaders || pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE); bool fRevertToInv = ((!state.fPreferHeaders &&
(!state.fPreferHeaderAndIDs || pto->vBlockHashesToAnnounce.size() > 1)) ||
pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
CBlockIndex *pBestIndex = NULL; // last header queued for delivery CBlockIndex *pBestIndex = NULL; // last header queued for delivery
ProcessBlockAvailability(pto->id); // ensure pindexBestKnownBlock is up-to-date ProcessBlockAvailability(pto->id); // ensure pindexBestKnownBlock is up-to-date
@ -5876,6 +5941,33 @@ bool SendMessages(CNode* pto)
} }
} }
} }
if (!fRevertToInv && !vHeaders.empty()) {
if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
// We only send up to 1 block as header-and-ids, as otherwise
// probably means we're doing an initial-ish-sync or they're slow
LogPrint("net", "%s sending header-and-ids %s to peer %d\n", __func__,
vHeaders.front().GetHash().ToString(), pto->id);
//TODO: Shouldn't need to reload block from disk, but requires refactor
CBlock block;
assert(ReadBlockFromDisk(block, pBestIndex, consensusParams));
CBlockHeaderAndShortTxIDs cmpctblock(block);
pto->PushMessage(NetMsgType::CMPCTBLOCK, cmpctblock);
state.pindexBestHeaderSent = pBestIndex;
} else if (state.fPreferHeaders) {
if (vHeaders.size() > 1) {
LogPrint("net", "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
vHeaders.size(),
vHeaders.front().GetHash().ToString(),
vHeaders.back().GetHash().ToString(), pto->id);
} else {
LogPrint("net", "%s: sending header %s to peer=%d\n", __func__,
vHeaders.front().GetHash().ToString(), pto->id);
}
pto->PushMessage(NetMsgType::HEADERS, vHeaders);
state.pindexBestHeaderSent = pBestIndex;
} else
fRevertToInv = true;
}
if (fRevertToInv) { if (fRevertToInv) {
// If falling back to using an inv, just try to inv the tip. // If falling back to using an inv, just try to inv the tip.
// The last entry in vBlockHashesToAnnounce was our tip at some point // The last entry in vBlockHashesToAnnounce was our tip at some point
@ -5901,18 +5993,6 @@ bool SendMessages(CNode* pto)
pto->id, hashToAnnounce.ToString()); pto->id, hashToAnnounce.ToString());
} }
} }
} else if (!vHeaders.empty()) {
if (vHeaders.size() > 1) {
LogPrint("net", "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
vHeaders.size(),
vHeaders.front().GetHash().ToString(),
vHeaders.back().GetHash().ToString(), pto->id);
} else {
LogPrint("net", "%s: sending header %s to peer=%d\n", __func__,
vHeaders.front().GetHash().ToString(), pto->id);
}
pto->PushMessage(NetMsgType::HEADERS, vHeaders);
state.pindexBestHeaderSent = pBestIndex;
} }
pto->vBlockHashesToAnnounce.clear(); pto->vBlockHashesToAnnounce.clear();
} }

Loading…
Cancel
Save