@ -428,21 +428,134 @@ static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, CValidationSt
return CheckInputs ( tx , state , view , flags , cacheSigStore , true , txdata ) ;
}
/**
* @ param [ out ] coins_to_uncache Return any outpoints which were not previously present in the
* coins cache , but were added as a result of validating the tx
* for mempool acceptance . This allows the caller to optionally
* remove the cache additions if the associated transaction ends
* up being rejected by the mempool .
*/
static bool AcceptToMemoryPoolWorker ( const CChainParams & chainparams , CTxMemPool & pool , CValidationState & state , const CTransactionRef & ptx ,
bool * pfMissingInputs , int64_t nAcceptTime , std : : list < CTransactionRef > * plTxnReplaced ,
bool bypass_limits , const CAmount & nAbsurdFee , std : : vector < COutPoint > & coins_to_uncache , bool test_accept ) EXCLUSIVE_LOCKS_REQUIRED ( cs_main )
namespace {
class MemPoolAccept
{
const CTransaction & tx = * ptx ;
const uint256 hash = tx . GetHash ( ) ;
AssertLockHeld ( cs_main ) ;
LOCK ( pool . cs ) ; // mempool "read lock" (held through GetMainSignals().TransactionAddedToMempool())
public :
MemPoolAccept ( CTxMemPool & mempool ) : m_pool ( mempool ) , m_view ( & m_dummy ) , m_viewmempool ( & : : ChainstateActive ( ) . CoinsTip ( ) , m_pool ) ,
m_limit_ancestors ( gArgs . GetArg ( " -limitancestorcount " , DEFAULT_ANCESTOR_LIMIT ) ) ,
m_limit_ancestor_size ( gArgs . GetArg ( " -limitancestorsize " , DEFAULT_ANCESTOR_SIZE_LIMIT ) * 1000 ) ,
m_limit_descendants ( gArgs . GetArg ( " -limitdescendantcount " , DEFAULT_DESCENDANT_LIMIT ) ) ,
m_limit_descendant_size ( gArgs . GetArg ( " -limitdescendantsize " , DEFAULT_DESCENDANT_SIZE_LIMIT ) * 1000 ) { }
// We put the arguments we're handed into a struct, so we can pass them
// around easier.
struct ATMPArgs {
const CChainParams & m_chainparams ;
CValidationState & m_state ;
bool * m_missing_inputs ;
const int64_t m_accept_time ;
std : : list < CTransactionRef > * m_replaced_transactions ;
const bool m_bypass_limits ;
const CAmount & m_absurd_fee ;
/*
* Return any outpoints which were not previously present in the coins
* cache , but were added as a result of validating the tx for mempool
* acceptance . This allows the caller to optionally remove the cache
* additions if the associated transaction ends up being rejected by
* the mempool .
*/
std : : vector < COutPoint > & m_coins_to_uncache ;
const bool m_test_accept ;
} ;
// Single transaction acceptance
bool AcceptSingleTransaction ( const CTransactionRef & ptx , ATMPArgs & args ) EXCLUSIVE_LOCKS_REQUIRED ( cs_main ) ;
private :
// All the intermediate state that gets passed between the various levels
// of checking a given transaction.
struct Workspace {
Workspace ( const CTransactionRef & ptx ) : m_ptx ( ptx ) , m_hash ( ptx - > GetHash ( ) ) { }
std : : set < uint256 > m_conflicts ;
CTxMemPool : : setEntries m_all_conflicting ;
CTxMemPool : : setEntries m_ancestors ;
std : : unique_ptr < CTxMemPoolEntry > m_entry ;
bool m_replacement_transaction ;
CAmount m_modified_fees ;
CAmount m_conflicting_fees ;
size_t m_conflicting_size ;
const CTransactionRef & m_ptx ;
const uint256 & m_hash ;
} ;
// Run the policy checks on a given transaction, excluding any script checks.
// Looks up inputs, calculates feerate, considers replacement, evaluates
// package limits, etc. As this function can be invoked for "free" by a peer,
// only tests that are fast should be done here (to avoid CPU DoS).
bool PreChecks ( ATMPArgs & args , Workspace & ws ) EXCLUSIVE_LOCKS_REQUIRED ( cs_main , m_pool . cs ) ;
// Run the script checks using our policy flags. As this can be slow, we should
// only invoke this on transactions that have otherwise passed policy checks.
bool PolicyScriptChecks ( ATMPArgs & args , Workspace & ws , PrecomputedTransactionData & txdata ) EXCLUSIVE_LOCKS_REQUIRED ( cs_main ) ;
// Re-run the script checks, using consensus flags, and try to cache the
// result in the scriptcache. This should be done after
// PolicyScriptChecks(). This requires that all inputs either be in our
// utxo set or in the mempool.
bool ConsensusScriptChecks ( ATMPArgs & args , Workspace & ws , PrecomputedTransactionData & txdata ) EXCLUSIVE_LOCKS_REQUIRED ( cs_main ) ;
// Try to add the transaction to the mempool, removing any conflicts first.
// Returns true if the transaction is in the mempool after any size
// limiting is performed, false otherwise.
bool Finalize ( ATMPArgs & args , Workspace & ws ) EXCLUSIVE_LOCKS_REQUIRED ( cs_main , m_pool . cs ) ;
// Compare a package's feerate against minimum allowed.
bool CheckFeeRate ( size_t package_size , CAmount package_fee , CValidationState & state )
{
CAmount mempoolRejectFee = m_pool . GetMinFee ( gArgs . GetArg ( " -maxmempool " , DEFAULT_MAX_MEMPOOL_SIZE ) * 1000000 ) . GetFee ( package_size ) ;
if ( mempoolRejectFee > 0 & & package_fee < mempoolRejectFee ) {
return state . Invalid ( ValidationInvalidReason : : TX_MEMPOOL_POLICY , false , REJECT_INSUFFICIENTFEE , " mempool min fee not met " , strprintf ( " %d < %d " , package_fee , mempoolRejectFee ) ) ;
}
if ( package_fee < : : minRelayTxFee . GetFee ( package_size ) ) {
return state . Invalid ( ValidationInvalidReason : : TX_MEMPOOL_POLICY , false , REJECT_INSUFFICIENTFEE , " min relay fee not met " , strprintf ( " %d < %d " , package_fee , : : minRelayTxFee . GetFee ( package_size ) ) ) ;
}
return true ;
}
private :
CTxMemPool & m_pool ;
CCoinsViewCache m_view ;
CCoinsViewMemPool m_viewmempool ;
CCoinsView m_dummy ;
// The package limits in effect at the time of invocation.
const size_t m_limit_ancestors ;
const size_t m_limit_ancestor_size ;
// These may be modified while evaluating a transaction (eg to account for
// in-mempool conflicts; see below).
size_t m_limit_descendants ;
size_t m_limit_descendant_size ;
} ;
bool MemPoolAccept : : PreChecks ( ATMPArgs & args , Workspace & ws )
{
const CTransactionRef & ptx = ws . m_ptx ;
const CTransaction & tx = * ws . m_ptx ;
const uint256 & hash = ws . m_hash ;
// Copy/alias what we need out of args
CValidationState & state = args . m_state ;
bool * pfMissingInputs = args . m_missing_inputs ;
const int64_t nAcceptTime = args . m_accept_time ;
const bool bypass_limits = args . m_bypass_limits ;
const CAmount & nAbsurdFee = args . m_absurd_fee ;
std : : vector < COutPoint > & coins_to_uncache = args . m_coins_to_uncache ;
// Alias what we need out of ws
std : : set < uint256 > & setConflicts = ws . m_conflicts ;
CTxMemPool : : setEntries & allConflicting = ws . m_all_conflicting ;
CTxMemPool : : setEntries & setAncestors = ws . m_ancestors ;
std : : unique_ptr < CTxMemPoolEntry > & entry = ws . m_entry ;
bool & fReplacementTransaction = ws . m_replacement_transaction ;
CAmount & nModifiedFees = ws . m_modified_fees ;
CAmount & nConflictingFees = ws . m_conflicting_fees ;
size_t & nConflictingSize = ws . m_conflicting_size ;
if ( pfMissingInputs ) {
* pfMissingInputs = false ;
}
@ -472,15 +585,14 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
return state . Invalid ( ValidationInvalidReason : : TX_PREMATURE_SPEND , false , REJECT_NONSTANDARD , " non-final " ) ;
// is it already in the memory pool?
if ( pool. exists ( hash ) ) {
if ( m_ pool. exists ( hash ) ) {
return state . Invalid ( ValidationInvalidReason : : TX_CONFLICT , false , REJECT_DUPLICATE , " txn-already-in-mempool " ) ;
}
// Check for conflicts with in-memory transactions
std : : set < uint256 > setConflicts ;
for ( const CTxIn & txin : tx . vin )
{
const CTransaction * ptxConflicting = pool. GetConflictTx ( txin . prevout ) ;
const CTransaction * ptxConflicting = m_ pool. GetConflictTx ( txin . prevout ) ;
if ( ptxConflicting ) {
if ( ! setConflicts . count ( ptxConflicting - > GetHash ( ) ) )
{
@ -514,395 +626,436 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
}
}
{
CCoinsView dummy ;
CCoinsViewCache view ( & dummy ) ;
LockPoints lp ;
CCoinsViewCache & coins_cache = : : ChainstateActive ( ) . CoinsTip ( ) ;
CCoinsViewMemPool viewMemPool ( & coins_cache , pool ) ;
view . SetBackend ( viewMemPool ) ;
// do all inputs exist?
for ( const CTxIn & txin : tx . vin ) {
if ( ! coins_cache . HaveCoinInCache ( txin . prevout ) ) {
coins_to_uncache . push_back ( txin . prevout ) ;
}
LockPoints lp ;
m_view . SetBackend ( m_viewmempool ) ;
// Note: this call may add txin.prevout to the coins cache
// (CoinsTip().cacheCoins) by way of FetchCoin(). It should be removed
// later (via coins_to_uncache) if this tx turns out to be invalid.
if ( ! view . HaveCoin ( txin . prevout ) ) {
// Are inputs missing because we already have the tx?
for ( size_t out = 0 ; out < tx . vout . size ( ) ; out + + ) {
// Optimistically just do efficient check of cache for outputs
if ( coins_cache . HaveCoinInCache ( COutPoint ( hash , out ) ) ) {
return state . Invalid ( ValidationInvalidReason : : TX_CONFLICT , false , REJECT_DUPLICATE , " txn-already-known " ) ;
}
}
// Otherwise assume this might be an orphan tx for which we just haven't seen parents yet
if ( pfMissingInputs ) {
* pfMissingInputs = true ;
CCoinsViewCache & coins_cache = : : ChainstateActive ( ) . CoinsTip ( ) ;
// do all inputs exist?
for ( const CTxIn & txin : tx . vin ) {
if ( ! coins_cache . HaveCoinInCache ( txin . prevout ) ) {
coins_to_uncache . push_back ( txin . prevout ) ;
}
// Note: this call may add txin.prevout to the coins cache
// (coins_cache.cacheCoins) by way of FetchCoin(). It should be removed
// later (via coins_to_uncache) if this tx turns out to be invalid.
if ( ! m_view . HaveCoin ( txin . prevout ) ) {
// Are inputs missing because we already have the tx?
for ( size_t out = 0 ; out < tx . vout . size ( ) ; out + + ) {
// Optimistically just do efficient check of cache for outputs
if ( coins_cache . HaveCoinInCache ( COutPoint ( hash , out ) ) ) {
return state . Invalid ( ValidationInvalidReason : : TX_CONFLICT , false , REJECT_DUPLICATE , " txn-already-known " ) ;
}
return false ; // fMissingInputs and !state.IsInvalid() is used to detect this condition, don't set state.Invalid()
}
// Otherwise assume this might be an orphan tx for which we just haven't seen parents yet
if ( pfMissingInputs ) {
* pfMissingInputs = true ;
}
return false ; // fMissingInputs and !state.IsInvalid() is used to detect this condition, don't set state.Invalid()
}
}
// Bring the best block into scope
view . GetBestBlock ( ) ;
// Bring the best block into scope
m_ view. GetBestBlock ( ) ;
// we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool
view . SetBackend ( dummy ) ;
// we have all inputs cached now, so switch back to dummy (to protect
// against bugs where we pull more inputs from disk that miss being added
// to coins_to_uncache)
m_view . SetBackend ( m_dummy ) ;
// Only accept BIP68 sequence locked transactions that can be mined in the next
// block; we don't want our mempool filled up with transactions that can't
// be mined yet.
// Must keep pool.cs for this unless we change CheckSequenceLocks to take a
// CoinsViewCache instead of create its own
if ( ! CheckSequenceLocks ( pool, tx , STANDARD_LOCKTIME_VERIFY_FLAGS , & lp ) )
return state . Invalid ( ValidationInvalidReason : : TX_PREMATURE_SPEND , false , REJECT_NONSTANDARD , " non-BIP68-final " ) ;
// Only accept BIP68 sequence locked transactions that can be mined in the next
// block; we don't want our mempool filled up with transactions that can't
// be mined yet.
// Must keep pool.cs for this unless we change CheckSequenceLocks to take a
// CoinsViewCache instead of create its own
if ( ! CheckSequenceLocks ( m_ pool, tx , STANDARD_LOCKTIME_VERIFY_FLAGS , & lp ) )
return state . Invalid ( ValidationInvalidReason : : TX_PREMATURE_SPEND , false , REJECT_NONSTANDARD , " non-BIP68-final " ) ;
CAmount nFees = 0 ;
if ( ! Consensus : : CheckTxInputs ( tx , state , view, GetSpendHeight ( view) , nFees ) ) {
return error ( " %s: Consensus::CheckTxInputs: %s, %s " , __func__ , tx . GetHash ( ) . ToString ( ) , FormatStateMessage ( state ) ) ;
}
CAmount nFees = 0 ;
if ( ! Consensus : : CheckTxInputs ( tx , state , m_ view, GetSpendHeight ( m_ view) , nFees ) ) {
return error ( " %s: Consensus::CheckTxInputs: %s, %s " , __func__ , tx . GetHash ( ) . ToString ( ) , FormatStateMessage ( state ) ) ;
}
// Check for non-standard pay-to-script-hash in inputs
if ( fRequireStandard & & ! AreInputsStandard ( tx , view) )
return state . Invalid ( ValidationInvalidReason : : TX_NOT_STANDARD , false , REJECT_NONSTANDARD , " bad-txns-nonstandard-inputs " ) ;
// Check for non-standard pay-to-script-hash in inputs
if ( fRequireStandard & & ! AreInputsStandard ( tx , m_ view) )
return state . Invalid ( ValidationInvalidReason : : TX_NOT_STANDARD , false , REJECT_NONSTANDARD , " bad-txns-nonstandard-inputs " ) ;
// Check for non-standard witness in P2WSH
if ( tx . HasWitness ( ) & & fRequireStandard & & ! IsWitnessStandard ( tx , view) )
return state . Invalid ( ValidationInvalidReason : : TX_WITNESS_MUTATED , false , REJECT_NONSTANDARD , " bad-witness-nonstandard " ) ;
// Check for non-standard witness in P2WSH
if ( tx . HasWitness ( ) & & fRequireStandard & & ! IsWitnessStandard ( tx , m_ view) )
return state . Invalid ( ValidationInvalidReason : : TX_WITNESS_MUTATED , false , REJECT_NONSTANDARD , " bad-witness-nonstandard " ) ;
int64_t nSigOpsCost = GetTransactionSigOpCost ( tx , view, STANDARD_SCRIPT_VERIFY_FLAGS ) ;
int64_t nSigOpsCost = GetTransactionSigOpCost ( tx , m_ view, STANDARD_SCRIPT_VERIFY_FLAGS ) ;
// nModifiedFees includes any fee deltas from PrioritiseTransaction
CAmount nModifiedFees = nFees ;
pool. ApplyDelta ( hash , nModifiedFees ) ;
// nModifiedFees includes any fee deltas from PrioritiseTransaction
nModifiedFees = nFees ;
m_ pool. ApplyDelta ( hash , nModifiedFees ) ;
// Keep track of transactions that spend a coinbase, which we re-scan
// during reorgs to ensure COINBASE_MATURITY is still met.
bool fSpendsCoinbase = false ;
for ( const CTxIn & txin : tx . vin ) {
const Coin & coin = view . AccessCoin ( txin . prevout ) ;
if ( coin . IsCoinBase ( ) ) {
fSpendsCoinbase = true ;
break ;
}
// Keep track of transactions that spend a coinbase, which we re-scan
// during reorgs to ensure COINBASE_MATURITY is still met.
bool fSpendsCoinbase = false ;
for ( const CTxIn & txin : tx . vin ) {
const Coin & coin = m_view . AccessCoin ( txin . prevout ) ;
if ( coin . IsCoinBase ( ) ) {
fSpendsCoinbase = true ;
break ;
}
}
CTxMemPoolEntry e ntry( ptx , nFees , nAcceptTime , : : ChainActive ( ) . Height ( ) ,
fSpendsCoinbase , nSigOpsCost , lp ) ;
unsigned int nSize = entry . GetTxSize ( ) ;
entry . reset ( new CTxMemPoolE ntry( ptx , nFees , nAcceptTime , : : ChainActive ( ) . Height ( ) ,
fSpendsCoinbase , nSigOpsCost , lp ) ) ;
unsigned int nSize = entry - > GetTxSize ( ) ;
if ( nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST )
return state . Invalid ( ValidationInvalidReason : : TX_NOT_STANDARD , false , REJECT_NONSTANDARD , " bad-txns-too-many-sigops " ,
if ( nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST )
return state . Invalid ( ValidationInvalidReason : : TX_NOT_STANDARD , false , REJECT_NONSTANDARD , " bad-txns-too-many-sigops " ,
strprintf ( " %d " , nSigOpsCost ) ) ;
CAmount mempoolRejectFee = pool . GetMinFee ( gArgs . GetArg ( " -maxmempool " , DEFAULT_MAX_MEMPOOL_SIZE ) * 1000000 ) . GetFee ( nSize ) ;
if ( ! bypass_limits & & mempoolRejectFee > 0 & & nModifiedFees < mempoolRejectFee ) {
return state . Invalid ( ValidationInvalidReason : : TX_MEMPOOL_POLICY , false , REJECT_INSUFFICIENTFEE , " mempool min fee not met " , strprintf ( " %d < %d " , nModifiedFees , mempoolRejectFee ) ) ;
}
// No transactions are allowed below minRelayTxFee except from disconnected blocks
if ( ! bypass_limits & & nModifiedFees < : : minRelayTxFee . GetFee ( nSize ) ) {
return state . Invalid ( ValidationInvalidReason : : TX_MEMPOOL_POLICY , false , REJECT_INSUFFICIENTFEE , " min relay fee not met " , strprintf ( " %d < %d " , nModifiedFees , : : minRelayTxFee . GetFee ( nSize ) ) ) ;
}
// No transactions are allowed below minRelayTxFee except from disconnected
// blocks
if ( ! bypass_limits & & ! CheckFeeRate ( nSize , nModifiedFees , state ) ) return false ;
if ( nAbsurdFee & & nFees > nAbsurdFee )
return state . Invalid ( ValidationInvalidReason : : TX_NOT_STANDARD , false ,
if ( nAbsurdFee & & nFees > nAbsurdFee )
return state . Invalid ( ValidationInvalidReason : : TX_NOT_STANDARD , false ,
REJECT_HIGHFEE , " absurdly-high-fee " ,
strprintf ( " %d > %d " , nFees , nAbsurdFee ) ) ;
const CTxMemPool : : setEntries setIterConflicting = pool . GetIterSet ( setConflicts ) ;
// Calculate in-mempool ancestors, up to a limit.
CTxMemPool : : setEntries setAncestors ;
size_t nLimitAncestors = gArgs . GetArg ( " -limitancestorcount " , DEFAULT_ANCESTOR_LIMIT ) ;
size_t nLimitAncestorSize = gArgs . GetArg ( " -limitancestorsize " , DEFAULT_ANCESTOR_SIZE_LIMIT ) * 1000 ;
size_t nLimitDescendants = gArgs . GetArg ( " -limitdescendantcount " , DEFAULT_DESCENDANT_LIMIT ) ;
size_t nLimitDescendantSize = gArgs . GetArg ( " -limitdescendantsize " , DEFAULT_DESCENDANT_SIZE_LIMIT ) * 1000 ;
if ( setConflicts . size ( ) = = 1 ) {
// In general, when we receive an RBF transaction with mempool conflicts, we want to know whether we
// would meet the chain limits after the conflicts have been removed. However, there isn't a practical
// way to do this short of calculating the ancestor and descendant sets with an overlay cache of
// changed mempool entries. Due to both implementation and runtime complexity concerns, this isn't
// very realistic, thus we only ensure a limited set of transactions are RBF'able despite mempool
// conflicts here. Importantly, we need to ensure that some transactions which were accepted using
// the below carve-out are able to be RBF'ed, without impacting the security the carve-out provides
// for off-chain contract systems (see link in the comment below).
//
// Specifically, the subset of RBF transactions which we allow despite chain limits are those which
// conflict directly with exactly one other transaction (but may evict children of said transaction),
// and which are not adding any new mempool dependencies. Note that the "no new mempool dependencies"
// check is accomplished later, so we don't bother doing anything about it here, but if BIP 125 is
// amended, we may need to move that check to here instead of removing it wholesale.
//
// Such transactions are clearly not merging any existing packages, so we are only concerned with
// ensuring that (a) no package is growing past the package size (not count) limits and (b) we are
// not allowing something to effectively use the (below) carve-out spot when it shouldn't be allowed
// to.
//
// To check these we first check if we meet the RBF criteria, above, and increment the descendant
// limits by the direct conflict and its descendants (as these are recalculated in
// CalculateMempoolAncestors by assuming the new transaction being added is a new descendant, with no
// removals, of each parent's existing dependant set). The ancestor count limits are unmodified (as
// the ancestor limits should be the same for both our new transaction and any conflicts).
// We don't bother incrementing nLimitDescendants by the full removal count as that limit never comes
// into force here (as we're only adding a single transaction).
assert ( setIterConflicting . size ( ) = = 1 ) ;
CTxMemPool : : txiter conflict = * setIterConflicting . begin ( ) ;
nLimitDescendants + = 1 ;
nLimitDescendantSize + = conflict - > GetSizeWithDescendants ( ) ;
}
std : : string errString ;
if ( ! pool . CalculateMemPoolAncestors ( entry , setAncestors , nLimitAncestors , nLimitAncestorSize , nLimitDescendants , nLimitDescendantSize , errString ) ) {
setAncestors . clear ( ) ;
// If CalculateMemPoolAncestors fails second time, we want the original error string.
std : : string dummy_err_string ;
// Contracting/payment channels CPFP carve-out:
// If the new transaction is relatively small (up to 40k weight)
// and has at most one ancestor (ie ancestor limit of 2, including
// the new transaction), allow it if its parent has exactly the
// descendant limit descendants.
//
// This allows protocols which rely on distrusting counterparties
// being able to broadcast descendants of an unconfirmed transaction
// to be secure by simply only having two immediately-spendable
// outputs - one for each counterparty. For more info on the uses for
// this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html
if ( nSize > EXTRA_DESCENDANT_TX_SIZE_LIMIT | |
! pool . CalculateMemPoolAncestors ( entry , setAncestors , 2 , nLimitAncestorSize , nLimitDescendants + 1 , nLimitDescendantSize + EXTRA_DESCENDANT_TX_SIZE_LIMIT , dummy_err_string ) ) {
return state . Invalid ( ValidationInvalidReason : : TX_MEMPOOL_POLICY , false , REJECT_NONSTANDARD , " too-long-mempool-chain " , errString ) ;
}
}
// A transaction that spends outputs that would be replaced by it is invalid. Now
// that we have the set of all ancestors we can detect this
// pathological case by making sure setConflicts and setAncestors don't
// intersect.
for ( CTxMemPool : : txiter ancestorIt : setAncestors )
const CTxMemPool : : setEntries setIterConflicting = m_pool . GetIterSet ( setConflicts ) ;
// Calculate in-mempool ancestors, up to a limit.
if ( setConflicts . size ( ) = = 1 ) {
// In general, when we receive an RBF transaction with mempool conflicts, we want to know whether we
// would meet the chain limits after the conflicts have been removed. However, there isn't a practical
// way to do this short of calculating the ancestor and descendant sets with an overlay cache of
// changed mempool entries. Due to both implementation and runtime complexity concerns, this isn't
// very realistic, thus we only ensure a limited set of transactions are RBF'able despite mempool
// conflicts here. Importantly, we need to ensure that some transactions which were accepted using
// the below carve-out are able to be RBF'ed, without impacting the security the carve-out provides
// for off-chain contract systems (see link in the comment below).
//
// Specifically, the subset of RBF transactions which we allow despite chain limits are those which
// conflict directly with exactly one other transaction (but may evict children of said transaction),
// and which are not adding any new mempool dependencies. Note that the "no new mempool dependencies"
// check is accomplished later, so we don't bother doing anything about it here, but if BIP 125 is
// amended, we may need to move that check to here instead of removing it wholesale.
//
// Such transactions are clearly not merging any existing packages, so we are only concerned with
// ensuring that (a) no package is growing past the package size (not count) limits and (b) we are
// not allowing something to effectively use the (below) carve-out spot when it shouldn't be allowed
// to.
//
// To check these we first check if we meet the RBF criteria, above, and increment the descendant
// limits by the direct conflict and its descendants (as these are recalculated in
// CalculateMempoolAncestors by assuming the new transaction being added is a new descendant, with no
// removals, of each parent's existing dependant set). The ancestor count limits are unmodified (as
// the ancestor limits should be the same for both our new transaction and any conflicts).
// We don't bother incrementing m_limit_descendants by the full removal count as that limit never comes
// into force here (as we're only adding a single transaction).
assert ( setIterConflicting . size ( ) = = 1 ) ;
CTxMemPool : : txiter conflict = * setIterConflicting . begin ( ) ;
m_limit_descendants + = 1 ;
m_limit_descendant_size + = conflict - > GetSizeWithDescendants ( ) ;
}
std : : string errString ;
if ( ! m_pool . CalculateMemPoolAncestors ( * entry , setAncestors , m_limit_ancestors , m_limit_ancestor_size , m_limit_descendants , m_limit_descendant_size , errString ) ) {
setAncestors . clear ( ) ;
// If CalculateMemPoolAncestors fails second time, we want the original error string.
std : : string dummy_err_string ;
// Contracting/payment channels CPFP carve-out:
// If the new transaction is relatively small (up to 40k weight)
// and has at most one ancestor (ie ancestor limit of 2, including
// the new transaction), allow it if its parent has exactly the
// descendant limit descendants.
//
// This allows protocols which rely on distrusting counterparties
// being able to broadcast descendants of an unconfirmed transaction
// to be secure by simply only having two immediately-spendable
// outputs - one for each counterparty. For more info on the uses for
// this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html
if ( nSize > EXTRA_DESCENDANT_TX_SIZE_LIMIT | |
! m_pool . CalculateMemPoolAncestors ( * entry , setAncestors , 2 , m_limit_ancestor_size , m_limit_descendants + 1 , m_limit_descendant_size + EXTRA_DESCENDANT_TX_SIZE_LIMIT , dummy_err_string ) ) {
return state . Invalid ( ValidationInvalidReason : : TX_MEMPOOL_POLICY , false , REJECT_NONSTANDARD , " too-long-mempool-chain " , errString ) ;
}
}
// A transaction that spends outputs that would be replaced by it is invalid. Now
// that we have the set of all ancestors we can detect this
// pathological case by making sure setConflicts and setAncestors don't
// intersect.
for ( CTxMemPool : : txiter ancestorIt : setAncestors )
{
const uint256 & hashAncestor = ancestorIt - > GetTx ( ) . GetHash ( ) ;
if ( setConflicts . count ( hashAncestor ) )
{
const uint256 & hashAncestor = ancestorIt - > GetTx ( ) . GetHash ( ) ;
if ( setConflicts . count ( hashAncestor ) )
{
return state . Invalid ( ValidationInvalidReason : : CONSENSUS , false , REJECT_INVALID , " bad-txns-spends-conflicting-tx " ,
strprintf ( " %s spends conflicting transaction %s " ,
hash . ToString ( ) ,
hashAncestor . ToString ( ) ) ) ;
}
return state . Invalid ( ValidationInvalidReason : : CONSENSUS , false , REJECT_INVALID , " bad-txns-spends-conflicting-tx " ,
strprintf ( " %s spends conflicting transaction %s " ,
hash . ToString ( ) ,
hashAncestor . ToString ( ) ) ) ;
}
}
// Check if it's economically rational to mine this transaction rather
// than the ones it replaces.
CAmount nConflictingFees = 0 ;
size_t nConflictingSize = 0 ;
uint64_t nConflictingCount = 0 ;
CTxMemPool : : setEntries allConflicting ;
// If we don't hold the lock allConflicting might be incomplete; the
// subsequent RemoveStaged() and addUnchecked() calls don't guarantee
// mempool consistency for us.
const bool fReplacementTransaction = setConflicts . size ( ) ;
if ( fReplacementTransaction )
{
CFeeRate newFeeRate ( nModifiedFees , nSize ) ;
std : : set < uint256 > setConflictsParents ;
const int maxDescendantsToVisit = 100 ;
for ( const auto & mi : setIterConflicting ) {
// Don't allow the replacement to reduce the feerate of the
// mempool.
//
// We usually don't want to accept replacements with lower
// feerates than what they replaced as that would lower the
// feerate of the next block. Requiring that the feerate always
// be increased is also an easy-to-reason about way to prevent
// DoS attacks via replacements.
//
// We only consider the feerates of transactions being directly
// replaced, not their indirect descendants. While that does
// mean high feerate children are ignored when deciding whether
// or not to replace, we do require the replacement to pay more
// overall fees too, mitigating most cases.
CFeeRate oldFeeRate ( mi - > GetModifiedFee ( ) , mi - > GetTxSize ( ) ) ;
if ( newFeeRate < = oldFeeRate )
{
return state . Invalid ( ValidationInvalidReason : : TX_MEMPOOL_POLICY , false , REJECT_INSUFFICIENTFEE , " insufficient fee " ,
strprintf ( " rejecting replacement %s; new feerate %s <= old feerate %s " ,
hash . ToString ( ) ,
newFeeRate . ToString ( ) ,
oldFeeRate . ToString ( ) ) ) ;
}
for ( const CTxIn & txin : mi - > GetTx ( ) . vin )
{
setConflictsParents . insert ( txin . prevout . hash ) ;
}
// Check if it's economically rational to mine this transaction rather
// than the ones it replaces.
nConflictingFees = 0 ;
nConflictingSize = 0 ;
uint64_t nConflictingCount = 0 ;
nConflictingCount + = mi - > GetCountWithDescendants ( ) ;
}
// This potentially overestimates the number of actual descendants
// but we just want to be conservative to avoid doing too much
// work.
if ( nConflictingCount < = maxDescendantsToVisit ) {
// If not too many to replace, then calculate the set of
// transactions that would have to be evicted
for ( CTxMemPool : : txiter it : setIterConflicting ) {
pool . CalculateDescendants ( it , allConflicting ) ;
}
for ( CTxMemPool : : txiter it : allConflicting ) {
nConflictingFees + = it - > GetModifiedFee ( ) ;
nConflictingSize + = it - > GetTxSize ( ) ;
}
} else {
return state . Invalid ( ValidationInvalidReason : : TX_MEMPOOL_POLICY , false , REJECT_NONSTANDARD , " too many potential replacements " ,
strprintf ( " rejecting replacement %s; too many potential replacements (%d > %d) \n " ,
// If we don't hold the lock allConflicting might be incomplete; the
// subsequent RemoveStaged() and addUnchecked() calls don't guarantee
// mempool consistency for us.
fReplacementTransaction = setConflicts . size ( ) ;
if ( fReplacementTransaction )
{
CFeeRate newFeeRate ( nModifiedFees , nSize ) ;
std : : set < uint256 > setConflictsParents ;
const int maxDescendantsToVisit = 100 ;
for ( const auto & mi : setIterConflicting ) {
// Don't allow the replacement to reduce the feerate of the
// mempool.
//
// We usually don't want to accept replacements with lower
// feerates than what they replaced as that would lower the
// feerate of the next block. Requiring that the feerate always
// be increased is also an easy-to-reason about way to prevent
// DoS attacks via replacements.
//
// We only consider the feerates of transactions being directly
// replaced, not their indirect descendants. While that does
// mean high feerate children are ignored when deciding whether
// or not to replace, we do require the replacement to pay more
// overall fees too, mitigating most cases.
CFeeRate oldFeeRate ( mi - > GetModifiedFee ( ) , mi - > GetTxSize ( ) ) ;
if ( newFeeRate < = oldFeeRate )
{
return state . Invalid ( ValidationInvalidReason : : TX_MEMPOOL_POLICY , false , REJECT_INSUFFICIENTFEE , " insufficient fee " ,
strprintf ( " rejecting replacement %s; new feerate %s <= old feerate %s " ,
hash . ToString ( ) ,
nConflictingCount ,
maxDescendantsToVisit ) ) ;
newFeeRate . ToString ( ) ,
oldFeeRate . ToString ( ) ) ) ;
}
for ( unsigned int j = 0 ; j < tx . vin . size ( ) ; j + + )
for ( const CTxIn & txin : mi - > GetTx ( ) . vin )
{
// We don't want to accept replacements that require low
// feerate junk to be mined first. Ideally we'd keep track of
// the ancestor feerates and make the decision based on that,
// but for now requiring all new inputs to be confirmed works.
//
// Note that if you relax this to make RBF a little more useful,
// this may break the CalculateMempoolAncestors RBF relaxation,
// above. See the comment above the first CalculateMempoolAncestors
// call for more info.
if ( ! setConflictsParents . count ( tx . vin [ j ] . prevout . hash ) )
{
// Rather than check the UTXO set - potentially expensive -
// it's cheaper to just check if the new input refers to a
// tx that's in the mempool.
if ( pool . exists ( tx . vin [ j ] . prevout . hash ) ) {
return state . Invalid ( ValidationInvalidReason : : TX_MEMPOOL_POLICY , false , REJECT_NONSTANDARD , " replacement-adds-unconfirmed " ,
strprintf ( " replacement %s adds unconfirmed input, idx %d " ,
hash . ToString ( ) , j ) ) ;
}
}
setConflictsParents . insert ( txin . prevout . hash ) ;
}
// The replacement must pay greater fees than the transactions it
// replaces - if we did the bandwidth used by those conflicting
// transactions would not be paid for.
if ( nModifiedFees < nConflictingFees )
{
return state . Invalid ( ValidationInvalidReason : : TX_MEMPOOL_POLICY , false , REJECT_INSUFFICIENTFEE , " insufficient fee " ,
strprintf ( " rejecting replacement %s, less fees than conflicting txs; %s < %s " ,
hash . ToString ( ) , FormatMoney ( nModifiedFees ) , FormatMoney ( nConflictingFees ) ) ) ;
nConflictingCount + = mi - > GetCountWithDescendants ( ) ;
}
// This potentially overestimates the number of actual descendants
// but we just want to be conservative to avoid doing too much
// work.
if ( nConflictingCount < = maxDescendantsToVisit ) {
// If not too many to replace, then calculate the set of
// transactions that would have to be evicted
for ( CTxMemPool : : txiter it : setIterConflicting ) {
m_pool . CalculateDescendants ( it , allConflicting ) ;
}
// Finally in addition to paying more fees than the conflicts the
// new transaction must pay for its own bandwidth.
CAmount nDeltaFees = nModifiedFees - nConflictingFees ;
if ( nDeltaFees < : : incrementalRelayFee . GetFee ( nSize ) )
{
return state . Invalid ( ValidationInvalidReason : : TX_MEMPOOL_POLICY , false , REJECT_INSUFFICIENTFEE , " insufficient fee " ,
strprintf ( " rejecting replacement %s, not enough additional fees to relay; %s < %s " ,
hash . ToString ( ) ,
FormatMoney ( nDeltaFees ) ,
FormatMoney ( : : incrementalRelayFee . GetFee ( nSize ) ) ) ) ;
for ( CTxMemPool : : txiter it : allConflicting ) {
nConflictingFees + = it - > GetModifiedFee ( ) ;
nConflictingSize + = it - > GetTxSize ( ) ;
}
} else {
return state . Invalid ( ValidationInvalidReason : : TX_MEMPOOL_POLICY , false , REJECT_NONSTANDARD , " too many potential replacements " ,
strprintf ( " rejecting replacement %s; too many potential replacements (%d > %d) \n " ,
hash . ToString ( ) ,
nConflictingCount ,
maxDescendantsToVisit ) ) ;
}
constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS ;
// Check against previous transactions
// The first loop above does all the inexpensive checks.
// Only if ALL inputs pass do we perform expensive ECDSA signature checks.
// Helps prevent CPU exhaustion denial-of-service attacks.
PrecomputedTransactionData txdata ( tx ) ;
if ( ! CheckInputs ( tx , state , view , scriptVerifyFlags , true , false , txdata ) ) {
// SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
// need to turn both off, and compare against just turning off CLEANSTACK
// to see if the failure is specifically due to witness validation.
CValidationState stateDummy ; // Want reported failures to be from first CheckInputs
if ( ! tx . HasWitness ( ) & & CheckInputs ( tx , stateDummy , view , scriptVerifyFlags & ~ ( SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK ) , true , false , txdata ) & &
! CheckInputs ( tx , stateDummy , view , scriptVerifyFlags & ~ SCRIPT_VERIFY_CLEANSTACK , true , false , txdata ) ) {
// Only the witness is missing, so the transaction itself may be fine.
state . Invalid ( ValidationInvalidReason : : TX_WITNESS_MUTATED , false ,
state . GetRejectCode ( ) , state . GetRejectReason ( ) , state . GetDebugMessage ( ) ) ;
for ( unsigned int j = 0 ; j < tx . vin . size ( ) ; j + + )
{
// We don't want to accept replacements that require low
// feerate junk to be mined first. Ideally we'd keep track of
// the ancestor feerates and make the decision based on that,
// but for now requiring all new inputs to be confirmed works.
//
// Note that if you relax this to make RBF a little more useful,
// this may break the CalculateMempoolAncestors RBF relaxation,
// above. See the comment above the first CalculateMempoolAncestors
// call for more info.
if ( ! setConflictsParents . count ( tx . vin [ j ] . prevout . hash ) )
{
// Rather than check the UTXO set - potentially expensive -
// it's cheaper to just check if the new input refers to a
// tx that's in the mempool.
if ( m_pool . exists ( tx . vin [ j ] . prevout . hash ) ) {
return state . Invalid ( ValidationInvalidReason : : TX_MEMPOOL_POLICY , false , REJECT_NONSTANDARD , " replacement-adds-unconfirmed " ,
strprintf ( " replacement %s adds unconfirmed input, idx %d " ,
hash . ToString ( ) , j ) ) ;
}
}
assert ( IsTransactionReason ( state . GetReason ( ) ) ) ;
return false ; // state filled in by CheckInputs
}
// Check again against the current block tip's script verification
// flags to cache our script execution flags. This is, of course,
// useless if the next block has different script flags from the
// previous one, but because the cache tracks script flags for us it
// will auto-invalidate and we'll just have a few blocks of extra
// misses on soft-fork activation.
//
// This is also useful in case of bugs in the standard flags that cause
// transactions to pass as valid when they're actually invalid. For
// instance the STRICTENC flag was incorrectly allowing certain
// CHECKSIG NOT scripts to pass, even though they were invalid.
//
// There is a similar check in CreateNewBlock() to prevent creating
// invalid blocks (using TestBlockValidity), however allowing such
// transactions into the mempool can be exploited as a DoS attack.
unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags ( : : ChainActive ( ) . Tip ( ) , chainparams . GetConsensus ( ) ) ;
if ( ! CheckInputsFromMempoolAndCache ( tx , state , view , pool , currentBlockScriptVerifyFlags , true , txdata ) ) {
return error ( " %s: BUG! PLEASE REPORT THIS! CheckInputs failed against latest-block but not STANDARD flags %s, %s " ,
__func__ , hash . ToString ( ) , FormatStateMessage ( state ) ) ;
}
if ( test_accept ) {
// Tx was accepted, but not added
return true ;
// The replacement must pay greater fees than the transactions it
// replaces - if we did the bandwidth used by those conflicting
// transactions would not be paid for.
if ( nModifiedFees < nConflictingFees )
{
return state . Invalid ( ValidationInvalidReason : : TX_MEMPOOL_POLICY , false , REJECT_INSUFFICIENTFEE , " insufficient fee " ,
strprintf ( " rejecting replacement %s, less fees than conflicting txs; %s < %s " ,
hash . ToString ( ) , FormatMoney ( nModifiedFees ) , FormatMoney ( nConflictingFees ) ) ) ;
}
// Remove conflicting transactions from the mempool
for ( CTxMemPool : : txiter it : allConflicting )
// Finally in addition to paying more fees than the conflicts the
// new transaction must pay for its own bandwidth.
CAmount nDeltaFees = nModifiedFees - nConflictingFees ;
if ( nDeltaFees < : : incrementalRelayFee . GetFee ( nSize ) )
{
LogPrint ( BCLog : : MEMPOOL , " replacing tx %s with %s for %s BTC additional fees, %d delta bytes \n " ,
it - > GetTx ( ) . GetHash ( ) . ToString ( ) ,
hash . ToString ( ) ,
FormatMoney ( nModifiedFees - nConflictingFees ) ,
( int ) nSize - ( int ) nConflictingSize ) ;
if ( plTxnReplaced )
plTxnReplaced - > push_back ( it - > GetSharedTx ( ) ) ;
return state . Invalid ( ValidationInvalidReason : : TX_MEMPOOL_POLICY , false , REJECT_INSUFFICIENTFEE , " insufficient fee " ,
strprintf ( " rejecting replacement %s, not enough additional fees to relay; %s < %s " ,
hash . ToString ( ) ,
FormatMoney ( nDeltaFees ) ,
FormatMoney ( : : incrementalRelayFee . GetFee ( nSize ) ) ) ) ;
}
pool . RemoveStaged ( allConflicting , false , MemPoolRemovalReason : : REPLACED ) ;
}
return true ;
}
bool MemPoolAccept : : PolicyScriptChecks ( ATMPArgs & args , Workspace & ws , PrecomputedTransactionData & txdata )
{
const CTransaction & tx = * ws . m_ptx ;
// This transaction should only count for fee estimation if:
// - it isn't a BIP 125 replacement transaction (may not be widely supported)
// - it's not being re-added during a reorg which bypasses typical mempool fee limits
// - the node is not behind
// - the transaction is not dependent on any other transactions in the mempool
bool validForFeeEstimation = ! fReplacementTransaction & & ! bypass_limits & & IsCurrentForFeeEstimation ( ) & & pool . HasNoInputsOf ( tx ) ;
CValidationState & state = args . m_state ;
// Store transaction in memory
pool . addUnchecked ( entry , setAncestors , validForFeeEstimation ) ;
constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS ;
// trim mempool and check if tx was trimmed
if ( ! bypass_limits ) {
LimitMempoolSize ( pool , gArgs . GetArg ( " -maxmempool " , DEFAULT_MAX_MEMPOOL_SIZE ) * 1000000 , gArgs . GetArg ( " -mempoolexpiry " , DEFAULT_MEMPOOL_EXPIRY ) * 60 * 60 ) ;
if ( ! pool . exists ( hash ) )
return state . Invalid ( ValidationInvalidReason : : TX_MEMPOOL_POLICY , false , REJECT_INSUFFICIENTFEE , " mempool full " ) ;
// Check against previous transactions
// This is done last to help prevent CPU exhaustion denial-of-service attacks.
if ( ! CheckInputs ( tx , state , m_view , scriptVerifyFlags , true , false , txdata ) ) {
// SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
// need to turn both off, and compare against just turning off CLEANSTACK
// to see if the failure is specifically due to witness validation.
CValidationState stateDummy ; // Want reported failures to be from first CheckInputs
if ( ! tx . HasWitness ( ) & & CheckInputs ( tx , stateDummy , m_view , scriptVerifyFlags & ~ ( SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK ) , true , false , txdata ) & &
! CheckInputs ( tx , stateDummy , m_view , scriptVerifyFlags & ~ SCRIPT_VERIFY_CLEANSTACK , true , false , txdata ) ) {
// Only the witness is missing, so the transaction itself may be fine.
state . Invalid ( ValidationInvalidReason : : TX_WITNESS_MUTATED , false ,
state . GetRejectCode ( ) , state . GetRejectReason ( ) , state . GetDebugMessage ( ) ) ;
}
assert ( IsTransactionReason ( state . GetReason ( ) ) ) ;
return false ; // state filled in by CheckInputs
}
return true ;
}
bool MemPoolAccept : : ConsensusScriptChecks ( ATMPArgs & args , Workspace & ws , PrecomputedTransactionData & txdata )
{
const CTransaction & tx = * ws . m_ptx ;
const uint256 & hash = ws . m_hash ;
CValidationState & state = args . m_state ;
const CChainParams & chainparams = args . m_chainparams ;
// Check again against the current block tip's script verification
// flags to cache our script execution flags. This is, of course,
// useless if the next block has different script flags from the
// previous one, but because the cache tracks script flags for us it
// will auto-invalidate and we'll just have a few blocks of extra
// misses on soft-fork activation.
//
// This is also useful in case of bugs in the standard flags that cause
// transactions to pass as valid when they're actually invalid. For
// instance the STRICTENC flag was incorrectly allowing certain
// CHECKSIG NOT scripts to pass, even though they were invalid.
//
// There is a similar check in CreateNewBlock() to prevent creating
// invalid blocks (using TestBlockValidity), however allowing such
// transactions into the mempool can be exploited as a DoS attack.
unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags ( : : ChainActive ( ) . Tip ( ) , chainparams . GetConsensus ( ) ) ;
if ( ! CheckInputsFromMempoolAndCache ( tx , state , m_view , m_pool , currentBlockScriptVerifyFlags , true , txdata ) ) {
return error ( " %s: BUG! PLEASE REPORT THIS! CheckInputs failed against latest-block but not STANDARD flags %s, %s " ,
__func__ , hash . ToString ( ) , FormatStateMessage ( state ) ) ;
}
return true ;
}
bool MemPoolAccept : : Finalize ( ATMPArgs & args , Workspace & ws )
{
const CTransaction & tx = * ws . m_ptx ;
const uint256 & hash = ws . m_hash ;
CValidationState & state = args . m_state ;
const bool bypass_limits = args . m_bypass_limits ;
CTxMemPool : : setEntries & allConflicting = ws . m_all_conflicting ;
CTxMemPool : : setEntries & setAncestors = ws . m_ancestors ;
const CAmount & nModifiedFees = ws . m_modified_fees ;
const CAmount & nConflictingFees = ws . m_conflicting_fees ;
const size_t & nConflictingSize = ws . m_conflicting_size ;
const bool fReplacementTransaction = ws . m_replacement_transaction ;
std : : unique_ptr < CTxMemPoolEntry > & entry = ws . m_entry ;
// Remove conflicting transactions from the mempool
for ( CTxMemPool : : txiter it : allConflicting )
{
LogPrint ( BCLog : : MEMPOOL , " replacing tx %s with %s for %s BTC additional fees, %d delta bytes \n " ,
it - > GetTx ( ) . GetHash ( ) . ToString ( ) ,
hash . ToString ( ) ,
FormatMoney ( nModifiedFees - nConflictingFees ) ,
( int ) entry - > GetTxSize ( ) - ( int ) nConflictingSize ) ;
if ( args . m_replaced_transactions )
args . m_replaced_transactions - > push_back ( it - > GetSharedTx ( ) ) ;
}
m_pool . RemoveStaged ( allConflicting , false , MemPoolRemovalReason : : REPLACED ) ;
// This transaction should only count for fee estimation if:
// - it isn't a BIP 125 replacement transaction (may not be widely supported)
// - it's not being re-added during a reorg which bypasses typical mempool fee limits
// - the node is not behind
// - the transaction is not dependent on any other transactions in the mempool
bool validForFeeEstimation = ! fReplacementTransaction & & ! bypass_limits & & IsCurrentForFeeEstimation ( ) & & m_pool . HasNoInputsOf ( tx ) ;
// Store transaction in memory
m_pool . addUnchecked ( * entry , setAncestors , validForFeeEstimation ) ;
// trim mempool and check if tx was trimmed
if ( ! bypass_limits ) {
LimitMempoolSize ( m_pool , gArgs . GetArg ( " -maxmempool " , DEFAULT_MAX_MEMPOOL_SIZE ) * 1000000 , gArgs . GetArg ( " -mempoolexpiry " , DEFAULT_MEMPOOL_EXPIRY ) * 60 * 60 ) ;
if ( ! m_pool . exists ( hash ) )
return state . Invalid ( ValidationInvalidReason : : TX_MEMPOOL_POLICY , false , REJECT_INSUFFICIENTFEE , " mempool full " ) ;
}
return true ;
}
bool MemPoolAccept : : AcceptSingleTransaction ( const CTransactionRef & ptx , ATMPArgs & args )
{
AssertLockHeld ( cs_main ) ;
LOCK ( m_pool . cs ) ; // mempool "read lock" (held through GetMainSignals().TransactionAddedToMempool())
Workspace workspace ( ptx ) ;
if ( ! PreChecks ( args , workspace ) ) return false ;
// Only compute the precomputed transaction data if we need to verify
// scripts (ie, other policy checks pass). We perform the inexpensive
// checks first and avoid hashing and signature verification unless those
// checks pass, to mitigate CPU exhaustion denial-of-service attacks.
PrecomputedTransactionData txdata ( * ptx ) ;
if ( ! PolicyScriptChecks ( args , workspace , txdata ) ) return false ;
if ( ! ConsensusScriptChecks ( args , workspace , txdata ) ) return false ;
// Tx was accepted, but not added
if ( args . m_test_accept ) return true ;
if ( ! Finalize ( args , workspace ) ) return false ;
GetMainSignals ( ) . TransactionAddedToMempool ( ptx ) ;
return true ;
}
} // anon namespace
/** (try to) add transaction to memory pool with a specified acceptance time **/
static bool AcceptToMemoryPoolWithTime ( const CChainParams & chainparams , CTxMemPool & pool , CValidationState & state , const CTransactionRef & tx ,
bool * pfMissingInputs , int64_t nAcceptTime , std : : list < CTransactionRef > * plTxnReplaced ,
bool bypass_limits , const CAmount nAbsurdFee , bool test_accept ) EXCLUSIVE_LOCKS_REQUIRED ( cs_main )
{
std : : vector < COutPoint > coins_to_uncache ;
bool res = AcceptToMemoryPoolWorker ( chainparams , pool , state , tx , pfMissingInputs , nAcceptTime , plTxnReplaced , bypass_limits , nAbsurdFee , coins_to_uncache , test_accept ) ;
MemPoolAccept : : ATMPArgs args { chainparams , state , pfMissingInputs , nAcceptTime , plTxnReplaced , bypass_limits , nAbsurdFee , coins_to_uncache , test_accept } ;
bool res = MemPoolAccept ( pool ) . AcceptSingleTransaction ( tx , args ) ;
if ( ! res ) {
// Remove coins that were not present in the coins cache before calling ATMPW;
// this is to prevent memory DoS in case we receive a large number of