Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
7d8d855
eth: implemented peer consensus based witness verification
pratikspatil024 Sep 15, 2025
fc81044
eth/fetcher: update peer drop mechanism
pratikspatil024 Sep 25, 2025
96ab269
Merge branch 'develop' of https://github.com/maticnetwork/bor into ps…
pratikspatil024 Sep 25, 2025
e94a428
eth: added dynamic page limit for witness based on gas limit
pratikspatil024 Sep 30, 2025
9515bd1
eth: fix lint
pratikspatil024 Sep 30, 2025
d327194
eth: download witness within limit and block for verificatio
pratikspatil024 Sep 30, 2025
8723b42
eth: fix lint
pratikspatil024 Sep 30, 2025
83ab606
eth, core: added a check to fix a potential vulnerability and added t…
pratikspatil024 Oct 1, 2025
3a6ab74
eth, core: include the original peer in the consenses to get consense…
pratikspatil024 Oct 1, 2025
439e927
eth: implemented RequestWitnessPageCount
pratikspatil024 Oct 1, 2025
e77b139
eth: implemented wit/2 and added new message type to get the witness …
pratikspatil024 Oct 3, 2025
a1703bb
Merge branch 'develop' of https://github.com/maticnetwork/bor into ps…
pratikspatil024 Oct 3, 2025
9f1fd71
eth: updated mock file
pratikspatil024 Oct 3, 2025
d9f0371
eth: removed code duplication
pratikspatil024 Oct 15, 2025
ba40cb6
eth: dropping peer immediately when page count verification fails
pratikspatil024 Oct 15, 2025
5cb56b4
eth: added more comments
pratikspatil024 Oct 15, 2025
6b8d178
Merge branch 'develop' of https://github.com/maticnetwork/bor into ps…
pratikspatil024 Oct 15, 2025
fabf132
Merge branch 'develop' of https://github.com/maticnetwork/bor into ps…
pratikspatil024 Oct 27, 2025
bb5c8fe
Merge branch 'develop' of https://github.com/maticnetwork/bor into ps…
pratikspatil024 Oct 31, 2025
2982663
Merge branch 'develop' of https://github.com/maticnetwork/bor into ps…
pratikspatil024 Nov 6, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
440 changes: 440 additions & 0 deletions core/stateless/witness_test.go

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions eth/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -403,6 +403,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
EventMux: eth.eventMux,
RequiredBlocks: config.RequiredBlocks,
EthAPI: blockChainAPI,
gasCeil: config.Miner.GasCeil,
checker: checker,
enableBlockTracking: eth.config.EnableBlockTracking,
txAnnouncementOnly: eth.p2pServer.TxAnnouncementOnly,
Expand Down
8 changes: 7 additions & 1 deletion eth/fetcher/block_fetcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ type BlockFetcher struct {
}

// NewBlockFetcher creates a block fetcher to retrieve blocks based on hash announcements.
func NewBlockFetcher(light bool, getHeader HeaderRetrievalFn, getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertHeaders headersInsertFn, insertChain chainInsertFn, dropPeer peerDropFn, enableBlockTracking bool, requireWitness bool) *BlockFetcher {
func NewBlockFetcher(light bool, getHeader HeaderRetrievalFn, getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertHeaders headersInsertFn, insertChain chainInsertFn, dropPeer peerDropFn, enableBlockTracking bool, requireWitness bool, gasCeil uint64) *BlockFetcher {
f := &BlockFetcher{
light: light,
notify: make(chan *blockAnnounce),
Expand Down Expand Up @@ -287,6 +287,7 @@ func NewBlockFetcher(light bool, getHeader HeaderRetrievalFn, getBlock blockRetr
f.getBlock,
f.getHeader,
f.chainHeight,
gasCeil,
)

return f
Expand Down Expand Up @@ -1297,3 +1298,8 @@ func (f *BlockFetcher) forgetBlock(hash common.Hash) {
f.wm.forget(hash)
}
}

// GetWitnessManager returns the witness manager for external access
func (f *BlockFetcher) GetWitnessManager() *witnessManager {
return f.wm
}
6 changes: 6 additions & 0 deletions eth/fetcher/block_fetcher_race_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ func TestBlockFetcherConcurrentMapAccess(t *testing.T) {
dropPeer,
false, // no block tracking
false, // no witness requirement
0, // no gas ceiling
)

// Start the fetcher
Expand Down Expand Up @@ -249,6 +250,7 @@ func TestWitnessManagerConcurrentAccess(t *testing.T) {
getBlock,
getHeader,
chainHeight,
0,
)

// Start the witness manager
Expand Down Expand Up @@ -480,6 +482,7 @@ func TestBlockFetcherMapStateConsistency(t *testing.T) {
dropPeer,
false,
false,
0,
)

fetcher.Start()
Expand Down Expand Up @@ -538,6 +541,7 @@ func TestWitnessManagerStateConsistency(t *testing.T) {
getBlock,
getHeader,
chainHeight,
0,
)

block := createTestBlock(101)
Expand Down Expand Up @@ -596,6 +600,7 @@ func TestBlockFetcherMemoryLeaks(t *testing.T) {
dropPeer,
false,
false,
0,
)

fetcher.Start()
Expand Down Expand Up @@ -652,6 +657,7 @@ func TestWitnessManagerMemoryLeaks(t *testing.T) {
getBlock,
getHeader,
chainHeight,
0,
)

// Add and remove many entries to test cleanup
Expand Down
2 changes: 1 addition & 1 deletion eth/fetcher/block_fetcher_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ func newTester(light bool) *fetcherTester {
blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
drops: make(map[string]bool),
}
tester.fetcher = NewBlockFetcher(light, tester.getHeader, tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertHeaders, tester.insertChain, tester.dropPeer, false, false)
tester.fetcher = NewBlockFetcher(light, tester.getHeader, tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertHeaders, tester.insertChain, tester.dropPeer, false, false, 0)
tester.fetcher.Start()

return tester
Expand Down
203 changes: 190 additions & 13 deletions eth/fetcher/witness_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,17 @@ const (

witnessCacheSize = 10
witnessCacheTTL = 2 * time.Minute

// Witness verification constants
witnessVerificationPeers = 2 // Number of random peers to query for verification
witnessVerificationTimeout = 5 * time.Second // Timeout for verification queries
witnessVerificationCacheTTL = 10 * time.Minute // Cache verification results for 10 minutes

// Witness size estimation constants
// Assuming 1M gas results in 1MB witness, and max page size is 15MB
gasPerMB = 1_000_000 // 1M gas per MB of witness
maxPageSizeMB = 15 // Maximum page size in MB
witnessPageThreshold = 10 // Default threshold if gas ceil not available
)

// witnessRequestState tracks the state of a pending witness request.
Expand All @@ -46,6 +57,13 @@ type cachedWitness struct {
timestamp time.Time
}

// witnessVerificationResult represents the result of verifying a witness page count
type witnessVerificationResult struct {
pageCount uint64
verified bool
timestamp time.Time
}

// witnessManager handles the logic specific to fetching and managing witnesses
// for blocks, isolating it from the main BlockFetcher loop.
type witnessManager struct {
Expand All @@ -62,6 +80,10 @@ type witnessManager struct {
witnessUnavailable map[common.Hash]time.Time // Tracks hashes whose witnesses are known to be unavailable, with expiry times.
witnessCache *ttlcache.Cache[common.Hash, *cachedWitness] // TTL cache of witnesses that arrived before their blocks

// Witness verification state
witnessVerificationCache *ttlcache.Cache[common.Hash, *witnessVerificationResult] // Cache of verified page counts
gasCeil uint64 // Gas ceiling for calculating dynamic page threshold

// Communication channels (owned by witnessManager)
injectNeedWitnessCh chan *injectBlockNeedWitnessMsg // Injected blocks needing witness fetch
injectWitnessCh chan *injectedWitnessMsg // Injected witnesses from broadcast
Expand Down Expand Up @@ -90,27 +112,36 @@ func newWitnessManager(
parentGetBlock blockRetrievalFn,
parentGetHeader HeaderRetrievalFn,
parentChainHeight chainHeightFn,
gasCeil uint64,
) *witnessManager {
// Create TTL cache with 1 minute expiration for witnesses
witnessCache := ttlcache.New[common.Hash, *cachedWitness](
ttlcache.WithTTL[common.Hash, *cachedWitness](witnessCacheTTL),
ttlcache.WithCapacity[common.Hash, *cachedWitness](witnessCacheSize),
)

// Create TTL cache for witness verification results
witnessVerificationCache := ttlcache.New[common.Hash, *witnessVerificationResult](
ttlcache.WithTTL[common.Hash, *witnessVerificationResult](witnessVerificationCacheTTL),
ttlcache.WithCapacity[common.Hash, *witnessVerificationResult](100), // Cache up to 100 verification results
)

m := &witnessManager{
parentQuit: parentQuit,
parentDropPeer: parentDropPeer,
parentEnqueueCh: parentEnqueueCh,
parentGetBlock: parentGetBlock,
parentGetHeader: parentGetHeader,
parentChainHeight: parentChainHeight,
pending: make(map[common.Hash]*witnessRequestState),
witnessUnavailable: make(map[common.Hash]time.Time),
witnessCache: witnessCache,
injectNeedWitnessCh: make(chan *injectBlockNeedWitnessMsg, 10),
injectWitnessCh: make(chan *injectedWitnessMsg, 10),
witnessTimer: time.NewTimer(0),
pokeCh: make(chan struct{}, 1),
parentQuit: parentQuit,
parentDropPeer: parentDropPeer,
parentEnqueueCh: parentEnqueueCh,
parentGetBlock: parentGetBlock,
parentGetHeader: parentGetHeader,
parentChainHeight: parentChainHeight,
pending: make(map[common.Hash]*witnessRequestState),
witnessUnavailable: make(map[common.Hash]time.Time),
witnessCache: witnessCache,
witnessVerificationCache: witnessVerificationCache,
gasCeil: gasCeil,
injectNeedWitnessCh: make(chan *injectBlockNeedWitnessMsg, 10),
injectWitnessCh: make(chan *injectedWitnessMsg, 10),
witnessTimer: time.NewTimer(0),
pokeCh: make(chan struct{}, 1),
}
// Clear the timer channel initially
if !m.witnessTimer.Stop() {
Expand Down Expand Up @@ -887,3 +918,149 @@ func (m *witnessManager) checkCompleting(announce *blockAnnounce, block *types.B
}

var ErrNoWitnessPeerAvailable = errors.New("no peer with witness available") // Define a potential specific error

// calculatePageThreshold calculates the dynamic page threshold based on gas ceiling
// Formula: ceil(gasCeil (in millions) / maxPageSizeMB)
// Example: 50M gas / 15MB per page = ceil(3.33) = 4 pages
func (m *witnessManager) calculatePageThreshold() uint64 {
if m.gasCeil == 0 {
return witnessPageThreshold // Return default if gas ceil not set
}

// Convert gas ceil to millions and divide by max page size in MB using ceiling division
gasCeilMB := m.gasCeil / gasPerMB

// Ceiling division: (a + b - 1) / b
threshold := (gasCeilMB + maxPageSizeMB - 1) / maxPageSizeMB

// Ensure minimum threshold of 1 page
if threshold < 1 {
threshold = 1
}

log.Debug("[wm] Calculated dynamic page threshold", "gasCeil", m.gasCeil, "gasCeilMB", gasCeilMB, "threshold", threshold)
return threshold
}

// getConsensusPageCountWithOriginal gets consensus page count including the original peer
func (m *witnessManager) getConsensusPageCountWithOriginal(peers []string, hash common.Hash, originalPageCount uint64, getWitnessPageCount func(peer string, hash common.Hash) (uint64, error)) uint64 {
// Start with the original peer's count
countMap := make(map[uint64]int)
countMap[originalPageCount] = 1

// Query random peers and add their counts
for _, peer := range peers {
pageCount, err := getWitnessPageCount(peer, hash)
if err == nil {
countMap[pageCount]++
}
}

// Find the most common page count (majority vote)
var maxCount int
var consensusCount uint64
for count, freq := range countMap {
if freq > maxCount {
maxCount = freq
consensusCount = count
}
}

// Log the consensus result
log.Debug("[wm] Consensus calculation", "counts", countMap, "consensus", consensusCount, "maxVotes", maxCount)

// Only return consensus if we have majority (at least 2 out of 3)
if maxCount >= 2 {
return consensusCount
}

// No clear majority, return 0 (will be treated as no consensus)
return 0
}

// cacheVerificationResult caches a successful verification result
func (m *witnessManager) cacheVerificationResult(hash common.Hash, pageCount uint64) {
m.witnessVerificationCache.Set(hash, &witnessVerificationResult{
pageCount: pageCount,
verified: true,
timestamp: time.Now(),
}, witnessVerificationCacheTTL)
}

// CheckWitnessPageCount checks if a witness page count should trigger verification
// Returns true if peer is honest (or under threshold), false if peer should be dropped
//
// Optimization: This function receives getRandomPeers and getWitnessPageCount as function
// references (not called yet). They are only executed if:
// 1. pageCount > threshold (line below)
// 2. Cache miss (in verifyWitnessPageCountSync)
// This avoids unnecessary peer queries in most cases (cache hits or small witnesses).
func (m *witnessManager) CheckWitnessPageCount(hash common.Hash, pageCount uint64, peer string, getRandomPeers func() []string, getWitnessPageCount func(peer string, hash common.Hash) (uint64, error)) bool {
// Calculate dynamic threshold based on gas ceiling
threshold := m.calculatePageThreshold()

// If page count is within threshold, no verification needed
// No peer queries are made in this case
if pageCount <= threshold {
log.Debug("[wm] Witness page count within threshold, no verification needed", "peer", peer, "pageCount", pageCount, "threshold", threshold)
return true
}

// Page count exceeds threshold - verify synchronously
// Note: Peer queries only happen after cache check in verifyWitnessPageCountSync
log.Debug("[wm] Witness page count exceeds threshold, running synchronous verification", "peer", peer, "hash", hash, "pageCount", pageCount, "threshold", threshold)
return m.verifyWitnessPageCountSync(hash, pageCount, peer, getRandomPeers, getWitnessPageCount)
}

// verifyWitnessPageCountSync verifies a witness page count synchronously and returns result
//
// Optimization flow:
// 1. Check cache first (below) - no peer queries if cache hit
// 2. Get random peers only if cache miss
// 3. Query peers for consensus only if we have enough peers
// This minimizes network overhead by avoiding redundant verifications.
func (m *witnessManager) verifyWitnessPageCountSync(hash common.Hash, reportedPageCount uint64, reportingPeer string, getRandomPeers func() []string, getWitnessPageCount func(peer string, hash common.Hash) (uint64, error)) bool {
// OPTIMIZATION: Check cache first before making any peer queries
// This avoids unnecessary network requests for recently verified witnesses
if cached := m.witnessVerificationCache.Get(hash); cached != nil {
if cached.Value().pageCount == reportedPageCount {
// Page count matches cached result, peer is honest
log.Debug("[wm] Cached verification result matches", "peer", reportingPeer, "pageCount", reportedPageCount)
return true
} else {
// Page count doesn't match cached result, peer is dishonest - drop immediately
log.Warn("Dropping dishonest peer - cached verification mismatch", "peer", reportingPeer, "reported", reportedPageCount, "cached", cached.Value().pageCount)
m.parentDropPeer(reportingPeer)
return false
}
}

// Cache miss - need to verify with other peers
// Now we call getRandomPeers() for the first time
randomPeers := getRandomPeers()
if len(randomPeers) < witnessVerificationPeers {
// Not enough peers for verification, assume honest (conservative approach)
log.Debug("[wm] Not enough peers for verification, assuming honest", "peer", reportingPeer, "availablePeers", len(randomPeers))
m.cacheVerificationResult(hash, reportedPageCount)
return true
}

// Select random peers for verification
selectedPeers := randomPeers[:witnessVerificationPeers]

// Query selected peers for page count and include original peer's count in consensus
consensusPageCount := m.getConsensusPageCountWithOriginal(selectedPeers, hash, reportedPageCount, getWitnessPageCount)

// Determine if original peer is honest based on majority consensus
if consensusPageCount != reportedPageCount && consensusPageCount != 0 {
// Peer is dishonest - drop immediately
log.Warn("Dropping dishonest peer - consensus verification failed", "peer", reportingPeer, "reported", reportedPageCount, "consensus", consensusPageCount)
m.parentDropPeer(reportingPeer)
return false
}

// Peer is honest - cache result
log.Debug("[wm] Peer verification successful", "peer", reportingPeer, "pageCount", reportedPageCount)
m.cacheVerificationResult(hash, reportedPageCount)
return true
}
Loading
Loading