OSDN Git Service

Add node discover function (#1032)
[bytom/bytom-spv.git] / netsync / fetcher.go
1 package netsync
2
3 import (
4         "errors"
5
6         log "github.com/sirupsen/logrus"
7         "gopkg.in/karalabe/cookiejar.v2/collections/prque"
8
9         "github.com/bytom/p2p"
10         core "github.com/bytom/protocol"
11         "github.com/bytom/protocol/bc"
12         "github.com/bytom/protocol/bc/types"
13 )
14
15 const (
16         maxQueueDist = 1024 //32 // Maximum allowed distance from the chain head to queue
17 )
18
19 var (
20         errTerminated = errors.New("terminated")
21 )
22
23 // Fetcher is responsible for accumulating block announcements from various peers
24 // and scheduling them for retrieval.
25 type Fetcher struct {
26         chain *core.Chain
27         sw    *p2p.Switch
28         peers *peerSet
29
30         // Various event channels
31         newMinedBlock chan *blockPending
32         quit          chan struct{}
33
34         // Block cache
35         queue  *prque.Prque              // Queue containing the import operations (block number sorted)
36         queues map[string]int            // Per peer block counts to prevent memory exhaustion
37         queued map[bc.Hash]*blockPending // Set of already queued blocks (to dedup imports)
38 }
39
40 //NewFetcher New creates a block fetcher to retrieve blocks of the new mined.
41 func NewFetcher(chain *core.Chain, sw *p2p.Switch, peers *peerSet) *Fetcher {
42         return &Fetcher{
43                 chain:         chain,
44                 sw:            sw,
45                 peers:         peers,
46                 newMinedBlock: make(chan *blockPending),
47                 quit:          make(chan struct{}),
48                 queue:         prque.New(),
49                 queued:        make(map[bc.Hash]*blockPending),
50         }
51 }
52
53 // Start boots up the announcement based synchroniser, accepting and processing
54 // hash notifications and block fetches until termination requested.
55 func (f *Fetcher) Start() {
56         go f.loop()
57 }
58
59 // Stop terminates the announcement based synchroniser, canceling all pending
60 // operations.
61 func (f *Fetcher) Stop() {
62         close(f.quit)
63 }
64
65 // Enqueue tries to fill gaps the the fetcher's future import queue.
66 func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
67         op := &blockPending{
68                 peerID: peer,
69                 block:  block,
70         }
71         select {
72         case f.newMinedBlock <- op:
73                 return nil
74         case <-f.quit:
75                 return errTerminated
76         }
77 }
78
79 // Loop is the main fetcher loop, checking and processing various notification
80 // events.
81 func (f *Fetcher) loop() {
82         for {
83                 // Import any queued blocks that could potentially fit
84                 height := f.chain.BestBlockHeight()
85                 for !f.queue.Empty() {
86                         op := f.queue.PopItem().(*blockPending)
87                         // If too high up the chain or phase, continue later
88                         number := op.block.Height
89                         if number > height+1 {
90                                 f.queue.Push(op, -float32(op.block.Height))
91                                 break
92                         }
93                         // Otherwise if fresh and still unknown, try and import
94                         hash := op.block.Hash()
95                         block, _ := f.chain.GetBlockByHash(&hash)
96                         if block != nil {
97                                 f.forgetBlock(hash)
98                                 continue
99                         }
100                         if op.block.PreviousBlockHash.String() != f.chain.BestBlockHash().String() {
101                                 f.forgetBlock(hash)
102                                 continue
103                         }
104                         f.insert(op.peerID, op.block)
105                 }
106                 // Wait for an outside event to occur
107                 select {
108                 case <-f.quit:
109                         // Fetcher terminating, abort all operations
110                         return
111
112                 case op := <-f.newMinedBlock:
113                         // A direct block insertion was requested, try and fill any pending gaps
114                         f.enqueue(op.peerID, op.block)
115                 }
116         }
117 }
118
119 // enqueue schedules a new future import operation, if the block to be imported
120 // has not yet been seen.
121 func (f *Fetcher) enqueue(peer string, block *types.Block) {
122         hash := block.Hash()
123
124         //TODO: Ensure the peer isn't DOSing us
125         // Discard any past or too distant blocks
126         if dist := int64(block.Height) - int64(f.chain.BestBlockHeight()); dist < 0 || dist > maxQueueDist {
127                 log.Info("Discarded propagated block, too far away", " peer: ", peer, "number: ", block.Height, "distance: ", dist)
128                 return
129         }
130         // Schedule the block for future importing
131         if _, ok := f.queued[hash]; !ok {
132                 op := &blockPending{
133                         peerID: peer,
134                         block:  block,
135                 }
136                 f.queued[hash] = op
137                 f.queue.Push(op, -float32(block.Height))
138                 log.Info("Queued receive mine block.", " peer:", peer, " number:", block.Height, " queued:", f.queue.Size())
139         }
140 }
141
142 // insert spawns a new goroutine to run a block insertion into the chain. If the
143 // block's number is at the same height as the current import phase, it updates
144 // the phase states accordingly.
145 func (f *Fetcher) insert(peerID string, block *types.Block) {
146         // Run the import on a new thread
147         log.Info("Importing propagated block", " from peer: ", peerID, " height: ", block.Height)
148         // Run the actual import and log any issues
149         if _, err := f.chain.ProcessBlock(block); err != nil {
150                 log.Info("Propagated block import failed", " from peer: ", peerID, " height: ", block.Height, "err: ", err)
151                 fPeer, ok := f.peers.Peer(peerID)
152                 if !ok {
153                         return
154                 }
155                 swPeer := fPeer.getPeer()
156                 if ban := fPeer.addBanScore(20, 0, "block process error"); ban {
157                         f.sw.AddBannedPeer(swPeer)
158                         f.sw.StopPeerGracefully(swPeer)
159                 }
160                 return
161         }
162         // If import succeeded, broadcast the block
163         log.Info("success process a block from new mined blocks cache. block height: ", block.Height)
164         peers, err := f.peers.BroadcastMinedBlock(block)
165         if err != nil {
166                 log.Errorf("Broadcast mine block error. %v", err)
167                 return
168         }
169         for _, fPeer := range peers {
170                 if fPeer == nil {
171                         continue
172                 }
173                 swPeer := fPeer.getPeer()
174                 log.Info("Fetcher broadcast block error. Stop peer.")
175                 f.sw.StopPeerGracefully(swPeer)
176         }
177 }
178
179 // forgetBlock removes all traces of a queued block from the fetcher's internal
180 // state.
181 func (f *Fetcher) forgetBlock(hash bc.Hash) {
182         if insert := f.queued[hash]; insert != nil {
183                 delete(f.queued, hash)
184         }
185 }