Merge #21015: Make all of net_processing (and some of net) use std::chrono types

0eaea66e8b Make tx relay data structure use std::chrono types (Pieter Wuille)
55e82881a1 Make all Poisson delays use std::chrono types (Pieter Wuille)
c733ac4d8a Convert block/header sync timeouts to std::chrono types (Pieter Wuille)
4d98b401fb Change all ping times to std::chrono types (Pieter Wuille)

Pull request description:

  (Picking up #20044. Rebased against master.)

  This changes various uses of integers to represent timestamps and durations to `std::chrono` duration types with type-safe conversions, getting rid of various `.count()`, constructors, and conversion factors.

ACKs for top commit:
  jnewbery:
    utACK 0eaea66e8b
  vasild:
    ACK 0eaea66e8b
  MarcoFalke:
    re-ACK 0eaea66e8b, only changes: minor rename, using C++11 member initializer, using 2min chrono literal, rebase 🤚
  ajtowns:
    utACK 0eaea66e8b

Tree-SHA512: 2dbd8d53bf82e98f9b4611e61dc14c448e8957d1a02575b837fadfd59f80e98614d0ccf890fc351f960ade76a6fb8051b282e252e81675a8ee753dba8b1d7f57
pull/19771/head
fanquake 4 years ago
commit 33921379b6
No known key found for this signature in database
GPG Key ID: 2EEB9F5CC09526C1

@ -603,8 +603,8 @@ void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap)
stats.minFeeFilter = 0;
}
stats.m_ping_usec = m_last_ping_time;
stats.m_min_ping_usec = m_min_ping_time;
X(m_last_ping_time);
X(m_min_ping_time);
// Leave string empty if addrLocal invalid (not filled in yet)
CService addrLocalUnlocked = GetAddrLocal();
@ -1761,12 +1761,11 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
}
// Initiate network connections
auto start = GetTime<std::chrono::seconds>();
auto start = GetTime<std::chrono::microseconds>();
// Minimum time before next feeler connection (in microseconds).
int64_t nNextFeeler = PoissonNextSend(count_microseconds(start), FEELER_INTERVAL);
int64_t nNextExtraBlockRelay = PoissonNextSend(count_microseconds(start), EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
auto next_feeler = PoissonNextSend(start, FEELER_INTERVAL);
auto next_extra_block_relay = PoissonNextSend(start, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
const bool dnsseed = gArgs.GetBoolArg("-dnsseed", DEFAULT_DNSSEED);
bool add_fixed_seeds = gArgs.GetBoolArg("-fixedseeds", DEFAULT_FIXEDSEEDS);
@ -1849,7 +1848,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
}
ConnectionType conn_type = ConnectionType::OUTBOUND_FULL_RELAY;
int64_t nTime = GetTimeMicros();
auto now = GetTime<std::chrono::microseconds>();
bool anchor = false;
bool fFeeler = false;
@ -1861,7 +1860,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
// GetTryNewOutboundPeer() gets set when a stale tip is detected, so we
// try opening an additional OUTBOUND_FULL_RELAY connection. If none of
// these conditions are met, check to see if it's time to try an extra
// block-relay-only peer (to confirm our tip is current, see below) or the nNextFeeler
// block-relay-only peer (to confirm our tip is current, see below) or the next_feeler
// timer to decide if we should open a FEELER.
if (!m_anchors.empty() && (nOutboundBlockRelay < m_max_outbound_block_relay)) {
@ -1873,7 +1872,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
conn_type = ConnectionType::BLOCK_RELAY;
} else if (GetTryNewOutboundPeer()) {
// OUTBOUND_FULL_RELAY
} else if (nTime > nNextExtraBlockRelay && m_start_extra_block_relay_peers) {
} else if (now > next_extra_block_relay && m_start_extra_block_relay_peers) {
// Periodically connect to a peer (using regular outbound selection
// methodology from addrman) and stay connected long enough to sync
// headers, but not much else.
@ -1895,10 +1894,10 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
// Because we can promote these connections to block-relay-only
// connections, they do not get their own ConnectionType enum
// (similar to how we deal with extra outbound peers).
nNextExtraBlockRelay = PoissonNextSend(nTime, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
next_extra_block_relay = PoissonNextSend(now, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
conn_type = ConnectionType::BLOCK_RELAY;
} else if (nTime > nNextFeeler) {
nNextFeeler = PoissonNextSend(nTime, FEELER_INTERVAL);
} else if (now > next_feeler) {
next_feeler = PoissonNextSend(now, FEELER_INTERVAL);
conn_type = ConnectionType::FEELER;
fFeeler = true;
} else {
@ -2983,20 +2982,21 @@ bool CConnman::ForNode(NodeId id, std::function<bool(CNode* pnode)> func)
return found != nullptr && NodeFullyConnected(found) && func(found);
}
int64_t CConnman::PoissonNextSendInbound(int64_t now, int average_interval_seconds)
std::chrono::microseconds CConnman::PoissonNextSendInbound(std::chrono::microseconds now, std::chrono::seconds average_interval)
{
if (m_next_send_inv_to_incoming < now) {
if (m_next_send_inv_to_incoming.load() < now) {
// If this function were called from multiple threads simultaneously
// it would possible that both update the next send variable, and return a different result to their caller.
// This is not possible in practice as only the net processing thread invokes this function.
m_next_send_inv_to_incoming = PoissonNextSend(now, average_interval_seconds);
m_next_send_inv_to_incoming = PoissonNextSend(now, average_interval);
}
return m_next_send_inv_to_incoming;
}
int64_t PoissonNextSend(int64_t now, int average_interval_seconds)
std::chrono::microseconds PoissonNextSend(std::chrono::microseconds now, std::chrono::seconds average_interval)
{
return now + (int64_t)(log1p(GetRand(1ULL << 48) * -0.0000000000000035527136788 /* -1/2^48 */) * average_interval_seconds * -1000000.0 + 0.5);
double unscaled = -log1p(GetRand(1ULL << 48) * -0.0000000000000035527136788 /* -1/2^48 */);
return now + std::chrono::duration_cast<std::chrono::microseconds>(unscaled * average_interval + 0.5us);
}
CSipHasher CConnman::GetDeterministicRandomizer(uint64_t id) const

@ -49,10 +49,10 @@ static const bool DEFAULT_WHITELISTFORCERELAY = false;
/** Time after which to disconnect, after waiting for a ping response (or inactivity). */
static const int TIMEOUT_INTERVAL = 20 * 60;
/** Run the feeler connection loop once every 2 minutes or 120 seconds. **/
static const int FEELER_INTERVAL = 120;
/** Run the feeler connection loop once every 2 minutes. **/
static constexpr auto FEELER_INTERVAL = 2min;
/** Run the extra block-relay-only connection loop once every 5 minutes. **/
static const int EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL = 300;
static constexpr auto EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL = 5min;
/** The maximum number of addresses from our addrman to return in response to a getaddr message. */
static constexpr size_t MAX_ADDR_TO_SEND = 1000;
/** Maximum length of incoming protocol messages (no message over 4 MB is currently acceptable). */
@ -261,8 +261,8 @@ public:
uint64_t nRecvBytes;
mapMsgCmdSize mapRecvBytesPerMsgCmd;
NetPermissionFlags m_permissionFlags;
int64_t m_ping_usec;
int64_t m_min_ping_usec;
std::chrono::microseconds m_last_ping_time;
std::chrono::microseconds m_min_ping_time;
CAmount minFeeFilter;
// Our address, as reported by the peer
std::string addrLocal;
@ -573,7 +573,7 @@ public:
/** Minimum fee rate with which to filter inv's to this node */
std::atomic<CAmount> minFeeFilter{0};
CAmount lastSentFeeFilter{0};
int64_t nextSendTimeFeeFilter{0};
std::chrono::microseconds m_next_send_feefilter{0};
};
// m_tx_relay == nullptr if we're not relaying transactions with this peer
@ -593,11 +593,11 @@ public:
std::atomic<int64_t> nLastTXTime{0};
/** Last measured round-trip time. Used only for RPC/GUI stats/debugging.*/
std::atomic<int64_t> m_last_ping_time{0};
std::atomic<std::chrono::microseconds> m_last_ping_time{0us};
/** Lowest measured round-trip time. Used as an inbound peer eviction
* criterium in CConnman::AttemptToEvictConnection. */
std::atomic<int64_t> m_min_ping_time{std::numeric_limits<int64_t>::max()};
std::atomic<std::chrono::microseconds> m_min_ping_time{std::chrono::microseconds::max()};
CNode(NodeId id, ServiceFlags nLocalServicesIn, SOCKET hSocketIn, const CAddress& addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress& addrBindIn, const std::string& addrNameIn, ConnectionType conn_type_in, bool inbound_onion);
~CNode();
@ -719,8 +719,8 @@ public:
/** A ping-pong round trip has completed successfully. Update latest and minimum ping times. */
void PongReceived(std::chrono::microseconds ping_time) {
m_last_ping_time = count_microseconds(ping_time);
m_min_ping_time = std::min(m_min_ping_time.load(), count_microseconds(ping_time));
m_last_ping_time = ping_time;
m_min_ping_time = std::min(m_min_ping_time.load(), ping_time);
}
private:
@ -1021,7 +1021,7 @@ public:
Works assuming that a single interval is used.
Variable intervals will result in privacy decrease.
*/
int64_t PoissonNextSendInbound(int64_t now, int average_interval_seconds);
std::chrono::microseconds PoissonNextSendInbound(std::chrono::microseconds now, std::chrono::seconds average_interval);
void SetAsmap(std::vector<bool> asmap) { addrman.m_asmap = std::move(asmap); }
@ -1256,7 +1256,7 @@ private:
*/
std::atomic_bool m_start_extra_block_relay_peers{false};
std::atomic<int64_t> m_next_send_inv_to_incoming{0};
std::atomic<std::chrono::microseconds> m_next_send_inv_to_incoming{0us};
/**
* A vector of -bind=<address>:<port>=onion arguments each of which is
@ -1269,13 +1269,7 @@ private:
};
/** Return a timestamp in the future (in microseconds) for exponentially distributed events. */
int64_t PoissonNextSend(int64_t now, int average_interval_seconds);
/** Wrapper to return mockable type */
inline std::chrono::microseconds PoissonNextSend(std::chrono::microseconds now, std::chrono::seconds average_interval)
{
return std::chrono::microseconds{PoissonNextSend(now.count(), average_interval.count())};
}
std::chrono::microseconds PoissonNextSend(std::chrono::microseconds now, std::chrono::seconds average_interval);
/** Dump binary message to file, with timestamp */
void CaptureMessage(const CAddress& addr, const std::string& msg_type, const Span<const unsigned char>& data, bool is_incoming);
@ -1284,7 +1278,7 @@ struct NodeEvictionCandidate
{
NodeId id;
int64_t nTimeConnected;
int64_t m_min_ping_time;
std::chrono::microseconds m_min_ping_time;
int64_t nLastBlockTime;
int64_t nLastTXTime;
bool fRelevantServices;

@ -37,13 +37,13 @@
#include <typeinfo>
/** How long to cache transactions in mapRelay for normal relay */
static constexpr std::chrono::seconds RELAY_TX_CACHE_TIME = std::chrono::minutes{15};
static constexpr auto RELAY_TX_CACHE_TIME = 15min;
/** How long a transaction has to be in the mempool before it can unconditionally be relayed (even when not in mapRelay). */
static constexpr std::chrono::seconds UNCONDITIONAL_RELAY_DELAY = std::chrono::minutes{2};
/** Headers download timeout expressed in microseconds
static constexpr auto UNCONDITIONAL_RELAY_DELAY = 2min;
/** Headers download timeout.
* Timeout = base + per_header * (expected number of headers) */
static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE = 15 * 60 * 1000000; // 15 minutes
static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1000; // 1ms/header
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
/** Protect at least this many outbound peers from disconnection due to slow/
* behind headers chain.
*/
@ -90,8 +90,8 @@ static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL{std::chrono::seco
static const unsigned int MAX_GETDATA_SZ = 1000;
/** Number of blocks that can be requested at any given time from a single peer. */
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
/** Timeout in seconds during which a peer must stall block download progress before being disconnected. */
static const unsigned int BLOCK_STALLING_TIMEOUT = 2;
/** Time during which a peer must stall block download progress before being disconnected. */
static constexpr auto BLOCK_STALLING_TIMEOUT = 2s;
/** Number of headers sent in one getheaders result. We rely on the assumption that if a peer sends
* less than this number, we reached its tip. Changing this value is a protocol upgrade. */
static const unsigned int MAX_HEADERS_RESULTS = 2000;
@ -105,10 +105,10 @@ static const int MAX_BLOCKTXN_DEPTH = 10;
* degree of disordering of blocks on disk (which make reindexing and pruning harder). We'll probably
* want to make this a per-peer adaptive value at some point. */
static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
/** Block download timeout base, expressed in millionths of the block interval (i.e. 10 min) */
static const int64_t BLOCK_DOWNLOAD_TIMEOUT_BASE = 1000000;
/** Block download timeout base, expressed in multiples of the block interval (i.e. 10 min) */
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1;
/** Additional block download timeout per parallel downloading peer (i.e. 5 min) */
static const int64_t BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 500000;
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5;
/** Maximum number of headers to announce when relaying blocks with headers message.*/
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
/** Maximum number of unconnecting headers announcements before DoS score */
@ -116,17 +116,21 @@ static const int MAX_UNCONNECTING_HEADERS = 10;
/** Minimum blocks required to signal NODE_NETWORK_LIMITED */
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
/** Average delay between local address broadcasts */
static constexpr std::chrono::hours AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24};
static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL = 24h;
/** Average delay between peer address broadcasts */
static constexpr std::chrono::seconds AVG_ADDRESS_BROADCAST_INTERVAL{30};
/** Average delay between trickled inventory transmissions in seconds.
* Blocks and peers with noban permission bypass this, outbound peers get half this delay. */
static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5;
static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL = 30s;
/** Average delay between trickled inventory transmissions for inbound peers.
* Blocks and peers with noban permission bypass this. */
static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL = 5s;
/** Average delay between trickled inventory transmissions for outbound peers.
* Use a smaller delay as there is less privacy concern for them.
* Blocks and peers with noban permission bypass this. */
static constexpr auto OUTBOUND_INVENTORY_BROADCAST_INTERVAL = 2s;
/** Maximum rate of inventory items to send per second.
* Limits the impact of low-fee transaction floods. */
static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7;
/** Maximum number of inventory items to send per transmission. */
static constexpr unsigned int INVENTORY_BROADCAST_MAX = INVENTORY_BROADCAST_PER_SECOND * INVENTORY_BROADCAST_INTERVAL;
static constexpr unsigned int INVENTORY_BROADCAST_MAX = INVENTORY_BROADCAST_PER_SECOND * count_seconds(INBOUND_INVENTORY_BROADCAST_INTERVAL);
/** The number of most recently announced transactions a peer can request. */
static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY = 3500;
/** Verify that INVENTORY_MAX_RECENT_RELAY is enough to cache everything typically
@ -135,9 +139,9 @@ static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY = 3500;
* peers, and random variations in the broadcast mechanism. */
static_assert(INVENTORY_MAX_RECENT_RELAY >= INVENTORY_BROADCAST_PER_SECOND * UNCONDITIONAL_RELAY_DELAY / std::chrono::seconds{1}, "INVENTORY_RELAY_MAX too low");
/** Average delay between feefilter broadcasts in seconds. */
static constexpr unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL = 10 * 60;
static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL = 10min;
/** Maximum feefilter broadcast delay after significant change. */
static constexpr unsigned int MAX_FEEFILTER_CHANGE_DELAY = 5 * 60;
static constexpr auto MAX_FEEFILTER_CHANGE_DELAY = 5min;
/** Maximum number of compact filters that may be requested with one getcfilters. See BIP 157. */
static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
/** Maximum number of cf hashes that may be requested with one getcfheaders. See BIP 157. */
@ -441,7 +445,7 @@ private:
typedef std::map<uint256, CTransactionRef> MapRelay;
MapRelay mapRelay GUARDED_BY(cs_main);
/** Expiration-time ordered list of (expire time, relay map entry) pairs. */
std::deque<std::pair<int64_t, MapRelay::iterator>> vRelayExpiration GUARDED_BY(cs_main);
std::deque<std::pair<std::chrono::microseconds, MapRelay::iterator>> g_relay_expiration GUARDED_BY(cs_main);
/**
* When a peer sends us a valid block, instruct it to announce blocks to us
@ -499,12 +503,12 @@ struct CNodeState {
//! Whether we've started headers synchronization with this peer.
bool fSyncStarted;
//! When to potentially disconnect peer for stalling headers download
int64_t nHeadersSyncTimeout;
std::chrono::microseconds m_headers_sync_timeout{0us};
//! Since when we're stalling block download progress (in microseconds), or 0.
int64_t nStallingSince;
std::chrono::microseconds m_stalling_since{0us};
std::list<QueuedBlock> vBlocksInFlight;
//! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty.
int64_t nDownloadingSince;
std::chrono::microseconds m_downloading_since{0us};
int nBlocksInFlight;
int nBlocksInFlightValidHeaders;
//! Whether we consider this a preferred download peer.
@ -587,9 +591,6 @@ struct CNodeState {
pindexBestHeaderSent = nullptr;
nUnconnectingHeaders = 0;
fSyncStarted = false;
nHeadersSyncTimeout = 0;
nStallingSince = 0;
nDownloadingSince = 0;
nBlocksInFlight = 0;
nBlocksInFlightValidHeaders = 0;
fPreferredDownload = false;
@ -638,11 +639,11 @@ bool PeerManagerImpl::MarkBlockAsReceived(const uint256& hash)
}
if (state->vBlocksInFlight.begin() == itInFlight->second.second) {
// First block on the queue was received, update the start download time for the next one
state->nDownloadingSince = std::max(state->nDownloadingSince, count_microseconds(GetTime<std::chrono::microseconds>()));
state->m_downloading_since = std::max(state->m_downloading_since, GetTime<std::chrono::microseconds>());
}
state->vBlocksInFlight.erase(itInFlight->second.second);
state->nBlocksInFlight--;
state->nStallingSince = 0;
state->m_stalling_since = 0us;
mapBlocksInFlight.erase(itInFlight);
return true;
}
@ -672,7 +673,7 @@ bool PeerManagerImpl::MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, co
state->nBlocksInFlightValidHeaders += it->fValidatedHeaders;
if (state->nBlocksInFlight == 1) {
// We're starting a block download (batch) from this peer.
state->nDownloadingSince = GetTime<std::chrono::microseconds>().count();
state->m_downloading_since = GetTime<std::chrono::microseconds>();
}
if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) {
nPeersWithValidatedDownloads++;
@ -1078,7 +1079,7 @@ bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats)
ping_wait = GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
}
stats.m_ping_wait_usec = count_microseconds(ping_wait);
stats.m_ping_wait = ping_wait;
return true;
}
@ -4279,7 +4280,13 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
// Only actively request headers from a single peer, unless we're close to today.
if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
state.fSyncStarted = true;
state.nHeadersSyncTimeout = count_microseconds(current_time) + HEADERS_DOWNLOAD_TIMEOUT_BASE + HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER * (GetAdjustedTime() - pindexBestHeader->GetBlockTime())/(consensusParams.nPowTargetSpacing);
state.m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
(
// Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling
// to maintain precision
std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} *
(GetAdjustedTime() - pindexBestHeader->GetBlockTime()) / consensusParams.nPowTargetSpacing
);
nSyncStarted++;
const CBlockIndex *pindexStart = pindexBestHeader;
/* If possible, start at the block preceding the currently
@ -4460,10 +4467,9 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
if (pto->m_tx_relay->nNextInvSend < current_time) {
fSendTrickle = true;
if (pto->IsInboundConn()) {
pto->m_tx_relay->nNextInvSend = std::chrono::microseconds{m_connman.PoissonNextSendInbound(count_microseconds(current_time), INVENTORY_BROADCAST_INTERVAL)};
pto->m_tx_relay->nNextInvSend = m_connman.PoissonNextSendInbound(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL);
} else {
// Use half the delay for outbound peers, as there is less privacy concern for them.
pto->m_tx_relay->nNextInvSend = PoissonNextSend(current_time, std::chrono::seconds{INVENTORY_BROADCAST_INTERVAL >> 1});
pto->m_tx_relay->nNextInvSend = PoissonNextSend(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL);
}
}
@ -4551,20 +4557,20 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
nRelayedTransactions++;
{
// Expire old relay messages
while (!vRelayExpiration.empty() && vRelayExpiration.front().first < count_microseconds(current_time))
while (!g_relay_expiration.empty() && g_relay_expiration.front().first < current_time)
{
mapRelay.erase(vRelayExpiration.front().second);
vRelayExpiration.pop_front();
mapRelay.erase(g_relay_expiration.front().second);
g_relay_expiration.pop_front();
}
auto ret = mapRelay.emplace(txid, std::move(txinfo.tx));
if (ret.second) {
vRelayExpiration.emplace_back(count_microseconds(current_time + std::chrono::microseconds{RELAY_TX_CACHE_TIME}), ret.first);
g_relay_expiration.emplace_back(current_time + RELAY_TX_CACHE_TIME, ret.first);
}
// Add wtxid-based lookup into mapRelay as well, so that peers can request by wtxid
auto ret2 = mapRelay.emplace(wtxid, ret.first->second);
if (ret2.second) {
vRelayExpiration.emplace_back(count_microseconds(current_time + std::chrono::microseconds{RELAY_TX_CACHE_TIME}), ret2.first);
g_relay_expiration.emplace_back(current_time + RELAY_TX_CACHE_TIME, ret2.first);
}
}
if (vInv.size() == MAX_INV_SZ) {
@ -4589,7 +4595,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
// Detect whether we're stalling
current_time = GetTime<std::chrono::microseconds>();
if (state.nStallingSince && state.nStallingSince < count_microseconds(current_time) - 1000000 * BLOCK_STALLING_TIMEOUT) {
if (state.m_stalling_since.count() && state.m_stalling_since < current_time - BLOCK_STALLING_TIMEOUT) {
// Stalling only triggers when the block download window cannot move. During normal steady state,
// the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
// should only happen during initial block download.
@ -4597,7 +4603,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
pto->fDisconnect = true;
return true;
}
// In case there is a block that has been in flight from this peer for 2 + 0.5 * N times the block interval
// In case there is a block that has been in flight from this peer for block_interval * (1 + 0.5 * N)
// (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
// We compensate for other peers to prevent killing off peers due to our own downstream link
// being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
@ -4605,17 +4611,17 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
if (state.vBlocksInFlight.size() > 0) {
QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0);
if (count_microseconds(current_time) > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
if (current_time > state.m_downloading_since + std::chrono::seconds{consensusParams.nPowTargetSpacing} * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->GetId());
pto->fDisconnect = true;
return true;
}
}
// Check for headers sync timeouts
if (state.fSyncStarted && state.nHeadersSyncTimeout < std::numeric_limits<int64_t>::max()) {
if (state.fSyncStarted && state.m_headers_sync_timeout < std::chrono::microseconds::max()) {
// Detect whether this is a stalling initial-headers-sync peer
if (pindexBestHeader->GetBlockTime() <= GetAdjustedTime() - 24 * 60 * 60) {
if (count_microseconds(current_time) > state.nHeadersSyncTimeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) {
if (current_time > state.m_headers_sync_timeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) {
// Disconnect a peer (without the noban permission) if it is our only sync peer,
// and we have others we could be using instead.
// Note: If all our peers are inbound, then we won't
@ -4634,13 +4640,13 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
// this peer (eventually).
state.fSyncStarted = false;
nSyncStarted--;
state.nHeadersSyncTimeout = 0;
state.m_headers_sync_timeout = 0us;
}
}
} else {
// After we've caught up once, reset the timeout so we can't trigger
// disconnect later.
state.nHeadersSyncTimeout = std::numeric_limits<int64_t>::max();
state.m_headers_sync_timeout = std::chrono::microseconds::max();
}
}
@ -4664,8 +4670,8 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
pindex->nHeight, pto->GetId());
}
if (state.nBlocksInFlight == 0 && staller != -1) {
if (State(staller)->nStallingSince == 0) {
State(staller)->nStallingSince = count_microseconds(current_time);
if (State(staller)->m_stalling_since == 0us) {
State(staller)->m_stalling_since = current_time;
LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
}
}
@ -4718,10 +4724,10 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
if (pto->m_tx_relay->lastSentFeeFilter == MAX_FILTER) {
// Send the current filter if we sent MAX_FILTER previously
// and made it out of IBD.
pto->m_tx_relay->nextSendTimeFeeFilter = count_microseconds(current_time) - 1;
pto->m_tx_relay->m_next_send_feefilter = 0us;
}
}
if (count_microseconds(current_time) > pto->m_tx_relay->nextSendTimeFeeFilter) {
if (current_time > pto->m_tx_relay->m_next_send_feefilter) {
CAmount filterToSend = g_filter_rounder.round(currentFilter);
// We always have a fee filter of at least minRelayTxFee
filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
@ -4729,13 +4735,13 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
pto->m_tx_relay->lastSentFeeFilter = filterToSend;
}
pto->m_tx_relay->nextSendTimeFeeFilter = PoissonNextSend(count_microseconds(current_time), AVG_FEEFILTER_BROADCAST_INTERVAL);
pto->m_tx_relay->m_next_send_feefilter = PoissonNextSend(current_time, AVG_FEEFILTER_BROADCAST_INTERVAL);
}
// If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
// until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
else if (count_microseconds(current_time) + MAX_FEEFILTER_CHANGE_DELAY * 1000000 < pto->m_tx_relay->nextSendTimeFeeFilter &&
else if (current_time + MAX_FEEFILTER_CHANGE_DELAY < pto->m_tx_relay->m_next_send_feefilter &&
(currentFilter < 3 * pto->m_tx_relay->lastSentFeeFilter / 4 || currentFilter > 4 * pto->m_tx_relay->lastSentFeeFilter / 3)) {
pto->m_tx_relay->nextSendTimeFeeFilter = count_microseconds(current_time) + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000;
pto->m_tx_relay->m_next_send_feefilter = current_time + GetRandomDuration<std::chrono::microseconds>(MAX_FEEFILTER_CHANGE_DELAY);
}
}
} // release cs_main

@ -29,7 +29,7 @@ struct CNodeStateStats {
int nSyncHeight = -1;
int nCommonHeight = -1;
int m_starting_height = -1;
int64_t m_ping_wait_usec;
std::chrono::microseconds m_ping_wait;
std::vector<int> vHeightInFlight;
};

@ -59,6 +59,8 @@
#include <QUrlQuery>
#include <QtGlobal>
#include <chrono>
#if defined(Q_OS_MAC)
#include <QProcess>
@ -706,9 +708,11 @@ QString formatServicesStr(quint64 mask)
return QObject::tr("None");
}
QString formatPingTime(int64_t ping_usec)
QString formatPingTime(std::chrono::microseconds ping_time)
{
return (ping_usec == std::numeric_limits<int64_t>::max() || ping_usec == 0) ? QObject::tr("N/A") : QString(QObject::tr("%1 ms")).arg(QString::number((int)(ping_usec / 1000), 10));
return (ping_time == std::chrono::microseconds::max() || ping_time == 0us) ?
QObject::tr("N/A") :
QString(QObject::tr("%1 ms")).arg(QString::number((int)(count_microseconds(ping_time) / 1000), 10));
}
QString formatTimeOffset(int64_t nTimeOffset)

@ -20,6 +20,8 @@
#include <QString>
#include <QTableView>
#include <chrono>
class QValidatedLineEdit;
class SendCoinsRecipient;
@ -202,8 +204,8 @@ namespace GUIUtil
/** Format CNodeStats.nServices bitmask into a user-readable string */
QString formatServicesStr(quint64 mask);
/** Format a CNodeStats.m_ping_usec into a user-readable string or display N/A, if 0 */
QString formatPingTime(int64_t ping_usec);
/** Format a CNodeStats.m_last_ping_time into a user-readable string or display N/A, if 0 */
QString formatPingTime(std::chrono::microseconds ping_time);
/** Format a CNodeCombinedStats.nTimeOffset into a user-readable string */
QString formatTimeOffset(int64_t nTimeOffset);

@ -34,7 +34,7 @@ bool NodeLessThan::operator()(const CNodeCombinedStats &left, const CNodeCombine
case PeerTableModel::Network:
return pLeft->m_network < pRight->m_network;
case PeerTableModel::Ping:
return pLeft->m_min_ping_usec < pRight->m_min_ping_usec;
return pLeft->m_min_ping_time < pRight->m_min_ping_time;
case PeerTableModel::Sent:
return pLeft->nSendBytes < pRight->nSendBytes;
case PeerTableModel::Received:
@ -170,7 +170,7 @@ QVariant PeerTableModel::data(const QModelIndex &index, int role) const
case Network:
return GUIUtil::NetworkToQString(rec->nodeStats.m_network);
case Ping:
return GUIUtil::formatPingTime(rec->nodeStats.m_min_ping_usec);
return GUIUtil::formatPingTime(rec->nodeStats.m_min_ping_time);
case Sent:
return GUIUtil::formatBytes(rec->nodeStats.nSendBytes);
case Received:

@ -1128,8 +1128,8 @@ void RPCConsole::updateDetailWidget()
ui->peerLastRecv->setText(TimeDurationField(time_now, stats->nodeStats.nLastRecv));
ui->peerBytesSent->setText(GUIUtil::formatBytes(stats->nodeStats.nSendBytes));
ui->peerBytesRecv->setText(GUIUtil::formatBytes(stats->nodeStats.nRecvBytes));
ui->peerPingTime->setText(GUIUtil::formatPingTime(stats->nodeStats.m_ping_usec));
ui->peerMinPing->setText(GUIUtil::formatPingTime(stats->nodeStats.m_min_ping_usec));
ui->peerPingTime->setText(GUIUtil::formatPingTime(stats->nodeStats.m_last_ping_time));
ui->peerMinPing->setText(GUIUtil::formatPingTime(stats->nodeStats.m_min_ping_time));
ui->timeoffset->setText(GUIUtil::formatTimeOffset(stats->nodeStats.nTimeOffset));
ui->peerVersion->setText(QString::number(stats->nodeStats.nVersion));
ui->peerSubversion->setText(QString::fromStdString(stats->nodeStats.cleanSubVer));
@ -1162,7 +1162,7 @@ void RPCConsole::updateDetailWidget()
ui->peerCommonHeight->setText(tr("Unknown"));
ui->peerHeight->setText(QString::number(stats->nodeStateStats.m_starting_height));
ui->peerPingWait->setText(GUIUtil::formatPingTime(stats->nodeStateStats.m_ping_wait_usec));
ui->peerPingWait->setText(GUIUtil::formatPingTime(stats->nodeStateStats.m_ping_wait));
}
ui->peersTabRightPanel->show();

@ -202,14 +202,14 @@ static RPCHelpMan getpeerinfo()
obj.pushKV("bytesrecv", stats.nRecvBytes);
obj.pushKV("conntime", stats.nTimeConnected);
obj.pushKV("timeoffset", stats.nTimeOffset);
if (stats.m_ping_usec > 0) {
obj.pushKV("pingtime", ((double)stats.m_ping_usec) / 1e6);
if (stats.m_last_ping_time > 0us) {
obj.pushKV("pingtime", CountSecondsDouble(stats.m_last_ping_time));
}
if (stats.m_min_ping_usec < std::numeric_limits<int64_t>::max()) {
obj.pushKV("minping", ((double)stats.m_min_ping_usec) / 1e6);
if (stats.m_min_ping_time < std::chrono::microseconds::max()) {
obj.pushKV("minping", CountSecondsDouble(stats.m_min_ping_time));
}
if (fStateStats && statestats.m_ping_wait_usec > 0) {
obj.pushKV("pingwait", ((double)statestats.m_ping_wait_usec) / 1e6);
if (fStateStats && statestats.m_ping_wait > 0s) {
obj.pushKV("pingwait", CountSecondsDouble(statestats.m_ping_wait));
}
obj.pushKV("version", stats.nVersion);
// Use the sanitized form of subver here, to avoid tricksy remote peers from

@ -104,7 +104,9 @@ FUZZ_TARGET_INIT(connman, initialize_connman)
},
[&] {
// Limit now to int32_t to avoid signed integer overflow
(void)connman.PoissonNextSendInbound(fuzzed_data_provider.ConsumeIntegral<int32_t>(), fuzzed_data_provider.ConsumeIntegral<int>());
(void)connman.PoissonNextSendInbound(
std::chrono::microseconds{fuzzed_data_provider.ConsumeIntegral<int32_t>()},
std::chrono::seconds{fuzzed_data_provider.ConsumeIntegral<int>()});
},
[&] {
CSerializedNetMsg serialized_net_msg;

@ -21,17 +21,17 @@ FUZZ_TARGET(node_eviction)
std::vector<NodeEvictionCandidate> eviction_candidates;
while (fuzzed_data_provider.ConsumeBool()) {
eviction_candidates.push_back({
fuzzed_data_provider.ConsumeIntegral<NodeId>(),
fuzzed_data_provider.ConsumeIntegral<int64_t>(),
fuzzed_data_provider.ConsumeIntegral<int64_t>(),
fuzzed_data_provider.ConsumeIntegral<int64_t>(),
fuzzed_data_provider.ConsumeIntegral<int64_t>(),
fuzzed_data_provider.ConsumeBool(),
fuzzed_data_provider.ConsumeBool(),
fuzzed_data_provider.ConsumeBool(),
fuzzed_data_provider.ConsumeIntegral<uint64_t>(),
fuzzed_data_provider.ConsumeBool(),
fuzzed_data_provider.ConsumeBool(),
/* id */ fuzzed_data_provider.ConsumeIntegral<NodeId>(),
/* nTimeConnected */ fuzzed_data_provider.ConsumeIntegral<int64_t>(),
/* m_min_ping_time */ std::chrono::microseconds{fuzzed_data_provider.ConsumeIntegral<int64_t>()},
/* nLastBlockTime */ fuzzed_data_provider.ConsumeIntegral<int64_t>(),
/* nLastTXTime */ fuzzed_data_provider.ConsumeIntegral<int64_t>(),
/* fRelevantServices */ fuzzed_data_provider.ConsumeBool(),
/* fRelayTxes */ fuzzed_data_provider.ConsumeBool(),
/* fBloomFilter */ fuzzed_data_provider.ConsumeBool(),
/* nKeyedNetGroup */ fuzzed_data_provider.ConsumeIntegral<uint64_t>(),
/* prefer_evict */ fuzzed_data_provider.ConsumeBool(),
/* m_is_local */ fuzzed_data_provider.ConsumeBool(),
});
}
// Make a copy since eviction_candidates may be in some valid but otherwise

@ -803,21 +803,6 @@ BOOST_AUTO_TEST_CASE(LocalAddress_BasicLifecycle)
BOOST_CHECK_EQUAL(IsLocal(addr), false);
}
BOOST_AUTO_TEST_CASE(PoissonNextSend)
{
g_mock_deterministic_tests = true;
int64_t now = 5000;
int average_interval_seconds = 600;
auto poisson = ::PoissonNextSend(now, average_interval_seconds);
std::chrono::microseconds poisson_chrono = ::PoissonNextSend(std::chrono::microseconds{now}, std::chrono::seconds{average_interval_seconds});
BOOST_CHECK_EQUAL(poisson, poisson_chrono.count());
g_mock_deterministic_tests = false;
}
std::vector<NodeEvictionCandidate> GetRandomNodeEvictionCandidates(const int n_candidates, FastRandomContext& random_context)
{
std::vector<NodeEvictionCandidate> candidates;
@ -825,7 +810,7 @@ std::vector<NodeEvictionCandidate> GetRandomNodeEvictionCandidates(const int n_c
candidates.push_back({
/* id */ id,
/* nTimeConnected */ static_cast<int64_t>(random_context.randrange(100)),
/* m_min_ping_time */ static_cast<int64_t>(random_context.randrange(100)),
/* m_min_ping_time */ std::chrono::microseconds{random_context.randrange(100)},
/* nLastBlockTime */ static_cast<int64_t>(random_context.randrange(100)),
/* nLastTXTime */ static_cast<int64_t>(random_context.randrange(100)),
/* fRelevantServices */ random_context.randbool(),
@ -885,7 +870,7 @@ BOOST_AUTO_TEST_CASE(node_eviction_test)
// from eviction.
BOOST_CHECK(!IsEvicted(
number_of_nodes, [](NodeEvictionCandidate& candidate) {
candidate.m_min_ping_time = candidate.id;
candidate.m_min_ping_time = std::chrono::microseconds{candidate.id};
},
{0, 1, 2, 3, 4, 5, 6, 7}, random_context));
@ -931,10 +916,10 @@ BOOST_AUTO_TEST_CASE(node_eviction_test)
// Combination of all tests above.
BOOST_CHECK(!IsEvicted(
number_of_nodes, [number_of_nodes](NodeEvictionCandidate& candidate) {
candidate.nKeyedNetGroup = number_of_nodes - candidate.id; // 4 protected
candidate.m_min_ping_time = candidate.id; // 8 protected
candidate.nLastTXTime = number_of_nodes - candidate.id; // 4 protected
candidate.nLastBlockTime = number_of_nodes - candidate.id; // 4 protected
candidate.nKeyedNetGroup = number_of_nodes - candidate.id; // 4 protected
candidate.m_min_ping_time = std::chrono::microseconds{candidate.id}; // 8 protected
candidate.nLastTXTime = number_of_nodes - candidate.id; // 4 protected
candidate.nLastBlockTime = number_of_nodes - candidate.id; // 4 protected
},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}, random_context));

@ -26,9 +26,16 @@ void UninterruptibleSleep(const std::chrono::microseconds& n);
* This helper is used to convert durations before passing them over an
* interface that doesn't support std::chrono (e.g. RPC, debug log, or the GUI)
*/
inline int64_t count_seconds(std::chrono::seconds t) { return t.count(); }
inline int64_t count_milliseconds(std::chrono::milliseconds t) { return t.count(); }
inline int64_t count_microseconds(std::chrono::microseconds t) { return t.count(); }
constexpr int64_t count_seconds(std::chrono::seconds t) { return t.count(); }
constexpr int64_t count_milliseconds(std::chrono::milliseconds t) { return t.count(); }
constexpr int64_t count_microseconds(std::chrono::microseconds t) { return t.count(); }
using SecondsDouble = std::chrono::duration<double, std::chrono::seconds::period>;
/**
* Helper to count the seconds in any std::chrono::duration type
*/
inline double CountSecondsDouble(SecondsDouble t) { return t.count(); }
/**
* DEPRECATED

Loading…
Cancel
Save