Use CScheduler for net's DumpAddresses

Instead of starting Yet Another Thread to dump addresses,
use CScheduler to do it.
pull/5964/head
Gavin Andresen 10 years ago
parent ddd0acd3db
commit 9a1dcea2df
No known key found for this signature in database
GPG Key ID: 7588242FBE38D3A8

@ -1378,7 +1378,7 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
LogPrintf("mapAddressBook.size() = %u\n", pwalletMain ? pwalletMain->mapAddressBook.size() : 0); LogPrintf("mapAddressBook.size() = %u\n", pwalletMain ? pwalletMain->mapAddressBook.size() : 0);
#endif #endif
StartNode(threadGroup); StartNode(threadGroup, scheduler);
#ifdef ENABLE_WALLET #ifdef ENABLE_WALLET
// Generate coins in the background // Generate coins in the background

@ -13,6 +13,7 @@
#include "chainparams.h" #include "chainparams.h"
#include "clientversion.h" #include "clientversion.h"
#include "primitives/transaction.h" #include "primitives/transaction.h"
#include "scheduler.h"
#include "ui_interface.h" #include "ui_interface.h"
#include "crypto/common.h" #include "crypto/common.h"
@ -1590,7 +1591,7 @@ void static Discover(boost::thread_group& threadGroup)
#endif #endif
} }
void StartNode(boost::thread_group& threadGroup) void StartNode(boost::thread_group& threadGroup, CScheduler& scheduler)
{ {
uiInterface.InitMessage(_("Loading addresses...")); uiInterface.InitMessage(_("Loading addresses..."));
// Load addresses for peers.dat // Load addresses for peers.dat
@ -1640,7 +1641,7 @@ void StartNode(boost::thread_group& threadGroup)
threadGroup.create_thread(boost::bind(&TraceThread<void (*)()>, "msghand", &ThreadMessageHandler)); threadGroup.create_thread(boost::bind(&TraceThread<void (*)()>, "msghand", &ThreadMessageHandler));
// Dump network addresses // Dump network addresses
threadGroup.create_thread(boost::bind(&LoopForever<void (*)()>, "dumpaddr", &DumpAddresses, DUMP_ADDRESSES_INTERVAL * 1000)); scheduler.scheduleEvery(&DumpAddresses, DUMP_ADDRESSES_INTERVAL);
} }
bool StopNode() bool StopNode()

@ -32,6 +32,7 @@
class CAddrMan; class CAddrMan;
class CBlockIndex; class CBlockIndex;
class CScheduler;
class CNode; class CNode;
namespace boost { namespace boost {
@ -72,7 +73,7 @@ bool OpenNetworkConnection(const CAddress& addrConnect, CSemaphoreGrant *grantOu
void MapPort(bool fUseUPnP); void MapPort(bool fUseUPnP);
unsigned short GetListenPort(); unsigned short GetListenPort();
bool BindListenPort(const CService &bindAddr, std::string& strError, bool fWhitelisted = false); bool BindListenPort(const CService &bindAddr, std::string& strError, bool fWhitelisted = false);
void StartNode(boost::thread_group& threadGroup); void StartNode(boost::thread_group& threadGroup, CScheduler& scheduler);
bool StopNode(); bool StopNode();
void SocketSendData(CNode *pnode); void SocketSendData(CNode *pnode);

@ -202,43 +202,6 @@ std::string HelpMessageOpt(const std::string& option, const std::string& message
void SetThreadPriority(int nPriority); void SetThreadPriority(int nPriority);
void RenameThread(const char* name); void RenameThread(const char* name);
/**
* Standard wrapper for do-something-forever thread functions.
* "Forever" really means until the thread is interrupted.
* Use it like:
* new boost::thread(boost::bind(&LoopForever<void (*)()>, "dumpaddr", &DumpAddresses, 900000));
* or maybe:
* boost::function<void()> f = boost::bind(&FunctionWithArg, argument);
* threadGroup.create_thread(boost::bind(&LoopForever<boost::function<void()> >, "nothing", f, milliseconds));
*/
template <typename Callable> void LoopForever(const char* name, Callable func, int64_t msecs)
{
std::string s = strprintf("bitcoin-%s", name);
RenameThread(s.c_str());
LogPrintf("%s thread start\n", name);
try
{
while (1)
{
MilliSleep(msecs);
func();
}
}
catch (const boost::thread_interrupted&)
{
LogPrintf("%s thread stop\n", name);
throw;
}
catch (const std::exception& e) {
PrintExceptionContinue(&e, name);
throw;
}
catch (...) {
PrintExceptionContinue(NULL, name);
throw;
}
}
/** /**
* .. and a wrapper that just calls func once * .. and a wrapper that just calls func once
*/ */

Loading…
Cancel
Save