OSDN Git Service

perfect_block_proposer (#1935)
authorPoseidon <shenao.78@163.com>
Wed, 19 May 2021 10:29:09 +0000 (18:29 +0800)
committerGitHub <noreply@github.com>
Wed, 19 May 2021 10:29:09 +0000 (18:29 +0800)
* perfect_block_proposer

* remove rubbish file

* solonet normal block

* fix ci

* remove test code

* add coinbase amount

* bug fix

* add fee to coinbase amount

* remove duplicate code

Co-authored-by: Paladz <yzhu101@uottawa.ca>
32 files changed:
account/accounts.go
account/accounts_test.go
api/accounts.go
api/api.go
api/nodeinfo.go
api/proposer.go [new file with mode: 0644]
asset/asset_test.go
cmd/bytomd/commands/init.go
cmd/bytomd/commands/run_node.go
config/config.go
consensus/general.go
database/store.go
node/node.go
p2p/discover/dht/net.go
p2p/switch.go
proposal/blockproposer/blockproposer.go
proposal/proposal.go
protocol/apply_block.go
protocol/auth_verification.go
protocol/bc/types/sup_link.go
protocol/block.go
protocol/casper.go
protocol/protocol.go
protocol/state/checkpoint.go
protocol/store.go
protocol/tree_node.go
protocol/txpool_test.go
protocol/validation/block.go
protocol/validation/block_test.go
test/integration/block_integration_util.go
test/util.go
wallet/wallet_test.go

index 6ec96a9..72d43d9 100644 (file)
@@ -541,6 +541,36 @@ func (m *Manager) GetLocalCtrlProgramByAddress(address string) (*CtrlProgram, er
        return cp, json.Unmarshal(rawProgram, cp)
 }
 
+// GetMiningAddress will return the mining address
+func (m *Manager) GetMiningAddress() (string, error) {
+       cp, err := m.GetCoinbaseCtrlProgram()
+       if err != nil {
+               return "", err
+       }
+
+       return cp.Address, nil
+}
+
+// SetMiningAddress will set the mining address
+func (m *Manager) SetMiningAddress(miningAddress string) (string, error) {
+       program, err := m.getProgramByAddress(miningAddress)
+       if err != nil {
+               return "", err
+       }
+
+       cp := &CtrlProgram{
+               Address:        miningAddress,
+               ControlProgram: program,
+       }
+       rawCP, err := json.Marshal(cp)
+       if err != nil {
+               return "", err
+       }
+
+       m.db.Set(miningAddressKey, rawCP)
+       return m.GetMiningAddress()
+}
+
 // IsLocalControlProgram check is the input control program belong to local
 func (m *Manager) IsLocalControlProgram(prog []byte) bool {
        var hash common.Hash
index 95332b2..3fd37db 100644 (file)
@@ -216,7 +216,7 @@ func mockAccountManager(t *testing.T) *Manager {
 
        store := database.NewStore(testDB)
        txPool := protocol.NewTxPool(store, dispatcher)
-       chain, err := protocol.NewChain(store, txPool)
+       chain, err := protocol.NewChain(store, txPool, dispatcher)
        if err != nil {
                t.Fatal(err)
        }
index 9aa4a36..df903df 100644 (file)
@@ -177,3 +177,30 @@ func (a *API) listAddresses(ctx context.Context, ins struct {
        start, end := getPageRange(len(addresses), ins.From, ins.Count)
        return NewSuccessResponse(addresses[start:end])
 }
+
+type minigAddressResp struct {
+       MiningAddress string `json:"mining_address"`
+}
+
+func (a *API) getMiningAddress(ctx context.Context) Response {
+       miningAddress, err := a.wallet.AccountMgr.GetMiningAddress()
+       if err != nil {
+               return NewErrorResponse(err)
+       }
+       return NewSuccessResponse(minigAddressResp{
+               MiningAddress: miningAddress,
+       })
+}
+
+// POST /set-mining-address
+func (a *API) setMiningAddress(ctx context.Context, in struct {
+       MiningAddress string `json:"mining_address"`
+}) Response {
+       miningAddress, err := a.wallet.AccountMgr.SetMiningAddress(in.MiningAddress)
+       if err != nil {
+               return NewErrorResponse(err)
+       }
+       return NewSuccessResponse(minigAddressResp{
+               MiningAddress: miningAddress,
+       })
+}
index ab4dc87..47462bb 100644 (file)
@@ -25,6 +25,7 @@ import (
        "github.com/bytom/bytom/net/websocket"
        "github.com/bytom/bytom/netsync/peers"
        "github.com/bytom/bytom/p2p"
+       "github.com/bytom/bytom/proposal/blockproposer"
        "github.com/bytom/bytom/protocol"
        "github.com/bytom/bytom/wallet"
 )
@@ -111,6 +112,7 @@ type API struct {
        chain           *protocol.Chain
        server          *http.Server
        handler         http.Handler
+       blockProposer   *blockproposer.BlockProposer
        txFeedTracker   *txfeed.Tracker
        notificationMgr *websocket.WSNotificationManager
        eventDispatcher *event.Dispatcher
@@ -178,12 +180,13 @@ type NetSync interface {
 }
 
 // NewAPI create and initialize the API
-func NewAPI(sync NetSync, wallet *wallet.Wallet, txfeeds *txfeed.Tracker, chain *protocol.Chain, config *cfg.Config, token *accesstoken.CredentialStore, dispatcher *event.Dispatcher, notificationMgr *websocket.WSNotificationManager) *API {
+func NewAPI(sync NetSync, wallet *wallet.Wallet, blockProposer *blockproposer.BlockProposer, txfeeds *txfeed.Tracker, chain *protocol.Chain, config *cfg.Config, token *accesstoken.CredentialStore, dispatcher *event.Dispatcher, notificationMgr *websocket.WSNotificationManager) *API {
        api := &API{
                sync:            sync,
                wallet:          wallet,
                chain:           chain,
                accessTokens:    token,
+               blockProposer:   blockProposer,
                txFeedTracker:   txfeeds,
                eventDispatcher: dispatcher,
                notificationMgr: notificationMgr,
@@ -214,6 +217,9 @@ func (a *API) buildHandler() {
                m.Handle("/validate-address", jsonHandler(a.validateAddress))
                m.Handle("/list-pubkeys", jsonHandler(a.listPubKeys))
 
+               m.Handle("/get-mining-address", jsonHandler(a.getMiningAddress))
+               m.Handle("/set-mining-address", jsonHandler(a.setMiningAddress))
+
                m.Handle("/create-asset", jsonHandler(a.createAsset))
                m.Handle("/update-asset-alias", jsonHandler(a.updateAssetAlias))
                m.Handle("/get-asset", jsonHandler(a.getAsset))
@@ -283,6 +289,9 @@ func (a *API) buildHandler() {
        m.Handle("/get-block-header", jsonHandler(a.getBlockHeader))
        m.Handle("/get-block-count", jsonHandler(a.getBlockCount))
 
+       m.Handle("/is-mining", jsonHandler(a.isMining))
+       m.Handle("/set-mining", jsonHandler(a.setMining))
+
        m.Handle("/verify-message", jsonHandler(a.verifyMessage))
        m.Handle("/compile", jsonHandler(a.compileEquity))
 
index c22f034..561a836 100644 (file)
@@ -20,6 +20,7 @@ type VersionInfo struct {
 type NetInfo struct {
        Listening    bool         `json:"listening"`
        Syncing      bool         `json:"syncing"`
+       Mining       bool         `json:"mining"`
        PeerCount    int          `json:"peer_count"`
        CurrentBlock uint64       `json:"current_block"`
        HighestBlock uint64       `json:"highest_block"`
@@ -32,6 +33,7 @@ func (a *API) GetNodeInfo() *NetInfo {
        info := &NetInfo{
                Listening:    a.sync.IsListening(),
                Syncing:      !a.sync.IsCaughtUp(),
+               Mining:       a.blockProposer.IsProposing(),
                PeerCount:    a.sync.PeerCount(),
                CurrentBlock: a.chain.BestBlockHeight(),
                NetWorkID:    a.sync.GetNetwork(),
@@ -90,6 +92,17 @@ func (a *API) getNetInfo() Response {
        return NewSuccessResponse(a.GetNodeInfo())
 }
 
+// isMining return is in mining or not
+func (a *API) isMining() Response {
+       IsMining := map[string]bool{"is_mining": a.IsMining()}
+       return NewSuccessResponse(IsMining)
+}
+
+// IsProposing return mining status
+func (a *API) IsMining() bool {
+       return a.blockProposer.IsProposing()
+}
+
 // return the peers of current node
 func (a *API) listPeers() Response {
        return NewSuccessResponse(a.sync.GetPeerInfos())
diff --git a/api/proposer.go b/api/proposer.go
new file mode 100644 (file)
index 0000000..7bd8607
--- /dev/null
@@ -0,0 +1,33 @@
+package api
+
+import (
+       "errors"
+)
+
+func (a *API) setMining(in struct {
+       IsMining bool `json:"is_mining"`
+}) Response {
+       if in.IsMining {
+               if _, err := a.wallet.AccountMgr.GetMiningAddress(); err != nil {
+                       return NewErrorResponse(errors.New("Mining address does not exist"))
+               }
+               return a.startMining()
+       }
+       return a.stopMining()
+}
+
+func (a *API) startMining() Response {
+       a.blockProposer.Start()
+       if !a.IsMining() {
+               return NewErrorResponse(errors.New("Failed to start mining"))
+       }
+       return NewSuccessResponse("")
+}
+
+func (a *API) stopMining() Response {
+       a.blockProposer.Stop()
+       if a.IsMining() {
+               return NewErrorResponse(errors.New("Failed to stop mining"))
+       }
+       return NewSuccessResponse("")
+}
index 9f35d8b..2e831ce 100644 (file)
@@ -162,7 +162,7 @@ func mockChain(testDB dbm.DB) (*protocol.Chain, error) {
        store := database.NewStore(testDB)
        dispatcher := event.NewDispatcher()
        txPool := protocol.NewTxPool(store, dispatcher)
-       chain, err := protocol.NewChain(store, txPool)
+       chain, err := protocol.NewChain(store, txPool, dispatcher)
        if err != nil {
                return nil, err
        }
index ba5a538..410c65b 100644 (file)
@@ -1,6 +1,8 @@
 package commands
 
 import (
+       "encoding/hex"
+       "io/ioutil"
        "os"
        "path"
 
@@ -8,6 +10,7 @@ import (
        "github.com/spf13/cobra"
 
        cfg "github.com/bytom/bytom/config"
+       "github.com/bytom/bytom/crypto/ed25519/chainkd"
 )
 
 var initFilesCmd = &cobra.Command{
@@ -36,5 +39,20 @@ func initFiles(cmd *cobra.Command, args []string) {
                cfg.EnsureRoot(config.RootDir, "solonet")
        }
 
+       //generate the node private key
+       keyFilePath := path.Join(config.RootDir, config.PrivateKeyFile)
+       if _, err := os.Stat(keyFilePath); os.IsNotExist(err) {
+               xprv, err := chainkd.NewXPrv(nil)
+               if err != nil {
+                       log.WithFields(log.Fields{"module": logModule, "err": err}).Fatal("fail on generate private key")
+               }
+
+               if err := ioutil.WriteFile(keyFilePath, []byte(hex.EncodeToString(xprv[:])), 0600); err != nil {
+                       log.WithFields(log.Fields{"module": logModule, "err": err}).Fatal("fail on save private key")
+               }
+
+               log.WithFields(log.Fields{"pubkey": xprv.XPub()}).Info("success generate private")
+       }
+
        log.WithFields(log.Fields{"module": logModule, "config": configFilePath}).Info("Initialized bytom")
 }
index b510cd1..a680815 100644 (file)
@@ -19,7 +19,7 @@ var runNodeCmd = &cobra.Command{
 
 func init() {
        runNodeCmd.Flags().String("prof_laddr", config.ProfListenAddress, "Use http to profile bytomd programs")
-
+       runNodeCmd.Flags().Bool("mining", config.Mining, "Enable mining")
        runNodeCmd.Flags().Bool("simd.enable", config.Simd.Enable, "Enable SIMD mechan for tensority")
 
        runNodeCmd.Flags().Bool("auth.disable", config.Auth.Disable, "Disable rpc access authenticate")
index de68907..ebc59f0 100644 (file)
@@ -102,6 +102,8 @@ type BaseConfig struct {
        // TCP or UNIX socket address for the profiling server to listen on
        ProfListenAddress string `mapstructure:"prof_laddr"`
 
+       Mining bool `mapstructure:"mining"`
+
        // Database backend: leveldb | memdb
        DBBackend string `mapstructure:"db_backend"`
 
@@ -128,6 +130,7 @@ func DefaultBaseConfig() BaseConfig {
        return BaseConfig{
                Moniker:           "anonymous",
                ProfListenAddress: "",
+               Mining:            false,
                DBBackend:         "leveldb",
                DBPath:            "data",
                KeysPath:          "keystore",
index 7e52890..a7be813 100644 (file)
@@ -7,7 +7,7 @@ import (
        "github.com/bytom/bytom/protocol/bc"
 )
 
-//consensus variables
+// consensus variables
 const (
        // Max gas that one block contains
        MaxBlockGas      = uint64(10000000)
@@ -17,7 +17,7 @@ const (
        DefaultGasCredit = int64(30000)
        NumOfValidators  = int(10)
 
-       //config parameter for coinbase reward
+       // config parameter for coinbase reward
        CoinbasePendingBlockNumber = uint64(100)
        subsidyReductionInterval   = uint64(840000)
        baseSubsidy                = uint64(41250000000)
@@ -40,6 +40,9 @@ const (
 type CasperConfig struct {
        // BlockTimeInterval, milliseconds, the block time interval for producing a block
        BlockTimeInterval uint64
+
+       // MaxTimeOffsetMs represent the max number of seconds a block time is allowed to be ahead of the current time
+       MaxTimeOffsetMs uint64
 }
 
 // BTMAssetID is BTM's asset id, the soul asset of Bytom
@@ -123,6 +126,7 @@ var MainNetParams = Params{
        Bech32HRPSegwit: "bm",
        DefaultPort:     "46657",
        DNSSeeds:        []string{"www.mainnetseed.bytom.io"},
+       CasperConfig:    CasperConfig{BlockTimeInterval: 6000, MaxTimeOffsetMs: 24000},
        Checkpoints: []Checkpoint{
                {10000, bc.NewHash([32]byte{0x93, 0xe1, 0xeb, 0x78, 0x21, 0xd2, 0xb4, 0xad, 0x0f, 0x5b, 0x1c, 0xea, 0x82, 0xe8, 0x43, 0xad, 0x8c, 0x09, 0x9a, 0xb6, 0x5d, 0x8f, 0x70, 0xc5, 0x84, 0xca, 0xa2, 0xdd, 0xf1, 0x74, 0x65, 0x2c})},
                {20000, bc.NewHash([32]byte{0x7d, 0x38, 0x61, 0xf3, 0x2c, 0xc0, 0x03, 0x81, 0xbb, 0xcd, 0x9a, 0x37, 0x6f, 0x10, 0x5d, 0xfe, 0x6f, 0xfe, 0x2d, 0xa5, 0xea, 0x88, 0xa5, 0xe3, 0x42, 0xed, 0xa1, 0x17, 0x9b, 0xa8, 0x0b, 0x7c})},
@@ -161,6 +165,7 @@ var TestNetParams = Params{
        Bech32HRPSegwit: "tm",
        DefaultPort:     "46656",
        DNSSeeds:        []string{"www.testnetseed.bytom.io"},
+       CasperConfig:    CasperConfig{BlockTimeInterval: 6000, MaxTimeOffsetMs: 24000},
        Checkpoints: []Checkpoint{
                {10303, bc.NewHash([32]byte{0x3e, 0x94, 0x5d, 0x35, 0x70, 0x30, 0xd4, 0x3b, 0x3d, 0xe3, 0xdd, 0x80, 0x67, 0x29, 0x9a, 0x5e, 0x09, 0xf9, 0xfb, 0x2b, 0xad, 0x5f, 0x92, 0xc8, 0x69, 0xd1, 0x42, 0x39, 0x74, 0x9a, 0xd1, 0x1c})},
                {40000, bc.NewHash([32]byte{0x6b, 0x13, 0x9a, 0x5b, 0x76, 0x77, 0x9b, 0xd4, 0x1c, 0xec, 0x53, 0x68, 0x44, 0xbf, 0xf4, 0x48, 0x94, 0x3d, 0x16, 0xe3, 0x9b, 0x2e, 0xe8, 0xa1, 0x0f, 0xa0, 0xbc, 0x7d, 0x2b, 0x17, 0x55, 0xfc})},
@@ -182,5 +187,6 @@ var TestNetParams = Params{
 var SoloNetParams = Params{
        Name:            "solo",
        Bech32HRPSegwit: "sm",
+       CasperConfig:    CasperConfig{BlockTimeInterval: 6000, MaxTimeOffsetMs: 24000},
        Checkpoints:     []Checkpoint{},
 }
index a968373..b840b4a 100644 (file)
@@ -2,12 +2,14 @@ package database
 
 import (
        "encoding/binary"
+       "encoding/hex"
        "encoding/json"
        "time"
 
        log "github.com/sirupsen/logrus"
        "github.com/tendermint/tmlibs/common"
 
+       "github.com/bytom/bytom/consensus"
        dbm "github.com/bytom/bytom/database/leveldb"
        "github.com/bytom/bytom/database/storage"
        "github.com/bytom/bytom/errors"
@@ -275,6 +277,7 @@ func (s *Store) GetCheckpoint(hash *bc.Hash) (*state.Checkpoint, error) {
                return nil, err
        }
 
+       setSupLinkToCheckpoint(checkpoint, header.SupLinks)
        return checkpoint, nil
 }
 
@@ -282,7 +285,7 @@ func (s *Store) GetCheckpoint(hash *bc.Hash) (*state.Checkpoint, error) {
 func (s *Store) GetCheckpointsByHeight(height uint64) ([]*state.Checkpoint, error) {
        iter := s.db.IteratorPrefix(calcCheckpointKey(height, nil))
        defer iter.Release()
-       return loadCheckpointsFromIter(iter)
+       return s.loadCheckpointsFromIter(iter)
 }
 
 // CheckpointsFromNode return all checkpoints from specified block height and hash
@@ -290,13 +293,13 @@ func (s *Store) CheckpointsFromNode(height uint64, hash *bc.Hash) ([]*state.Chec
        startKey := calcCheckpointKey(height, hash)
        iter := s.db.IteratorPrefixWithStart(CheckpointPrefix, startKey, false)
 
-       finalizedCheckpoint := &state.Checkpoint{}
-       if err := json.Unmarshal(iter.Value(), finalizedCheckpoint); err != nil {
+       firstCheckpoint := &state.Checkpoint{}
+       if err := json.Unmarshal(iter.Value(), firstCheckpoint); err != nil {
                return nil, err
        }
 
-       checkpoints := []*state.Checkpoint{finalizedCheckpoint}
-       subs, err := loadCheckpointsFromIter(iter)
+       checkpoints := []*state.Checkpoint{firstCheckpoint}
+       subs, err := s.loadCheckpointsFromIter(iter)
        if err != nil {
                return nil, err
        }
@@ -305,7 +308,7 @@ func (s *Store) CheckpointsFromNode(height uint64, hash *bc.Hash) ([]*state.Chec
        return checkpoints, nil
 }
 
-func loadCheckpointsFromIter(iter dbm.Iterator) ([]*state.Checkpoint, error) {
+func (s *Store) loadCheckpointsFromIter(iter dbm.Iterator) ([]*state.Checkpoint, error) {
        var checkpoints []*state.Checkpoint
        defer iter.Release()
        for iter.Next() {
@@ -314,6 +317,12 @@ func loadCheckpointsFromIter(iter dbm.Iterator) ([]*state.Checkpoint, error) {
                        return nil, err
                }
 
+               header, err := s.GetBlockHeader(&checkpoint.Hash)
+               if err != nil {
+                       return nil, err
+               }
+
+               setSupLinkToCheckpoint(checkpoint, header.SupLinks)
                checkpoints = append(checkpoints, checkpoint)
        }
        return checkpoints, nil
@@ -328,8 +337,32 @@ func (s *Store) SaveCheckpoints(checkpoints ...*state.Checkpoint) error {
                        return err
                }
 
+               if checkpoint.Height % state.BlocksOfEpoch != 1 {
+                       header, err := s.GetBlockHeader(&checkpoint.Hash)
+                       if err != nil {
+                               return err
+                       }
+
+                       batch.Delete(calcCheckpointKey(header.Height-1, &header.PreviousBlockHash))
+               }
+
                batch.Set(calcCheckpointKey(checkpoint.Height, &checkpoint.Hash), data)
        }
        batch.Write()
        return nil
 }
+
+func setSupLinkToCheckpoint(c *state.Checkpoint, supLinks types.SupLinks) {
+       for _, supLink := range supLinks {
+               var signatures [consensus.NumOfValidators]string
+               for i, signature := range supLink.Signatures {
+                       signatures[i] = hex.EncodeToString(signature)
+               }
+
+               c.SupLinks = append(c.SupLinks, &state.SupLink{
+                       SourceHeight: supLink.SourceHeight,
+                       SourceHash:   supLink.SourceHash,
+                       Signatures:   signatures,
+               })
+       }
+}
index 7adb99a..771b4ec 100644 (file)
@@ -11,6 +11,7 @@ import (
        log "github.com/sirupsen/logrus"
        cmn "github.com/tendermint/tmlibs/common"
        browser "github.com/toqueteos/webbrowser"
+       "github.com/bytom/bytom/proposal/blockproposer"
        "github.com/prometheus/prometheus/util/flock"
 
        "github.com/bytom/bytom/accesstoken"
@@ -51,6 +52,8 @@ type Node struct {
        notificationMgr *websocket.WSNotificationManager
        api             *api.API
        chain           *protocol.Chain
+       blockProposer   *blockproposer.BlockProposer
+       miningEnable    bool
        txfeed          *txfeed.Tracker
 }
 
@@ -81,7 +84,7 @@ func NewNode(config *cfg.Config) *Node {
        dispatcher := event.NewDispatcher()
        txPool := protocol.NewTxPool(store, dispatcher)
 
-       chain, err := protocol.NewChain(store, txPool)
+       chain, err := protocol.NewChain(store, txPool, dispatcher)
        if err != nil {
                cmn.Exit(cmn.Fmt("Failed to create chain structure: %v", err))
        }
@@ -147,13 +150,14 @@ func NewNode(config *cfg.Config) *Node {
                accessTokens:    accessTokens,
                wallet:          wallet,
                chain:           chain,
+               miningEnable:    config.Mining,
                txfeed:          txFeed,
 
                notificationMgr: notificationMgr,
        }
 
        node.BaseService = *cmn.NewBaseService(nil, "Node", node)
-
+       node.blockProposer = blockproposer.NewBlockProposer(chain, accounts, dispatcher)
        return node
 }
 
@@ -189,7 +193,7 @@ func launchWebBrowser(port string) {
 }
 
 func (n *Node) initAndstartAPIServer() {
-       n.api = api.NewAPI(n.syncManager, n.wallet, n.txfeed, n.chain, n.config, n.accessTokens, n.eventDispatcher, n.notificationMgr)
+       n.api = api.NewAPI(n.syncManager, n.wallet, n.blockProposer, n.txfeed, n.chain, n.config, n.accessTokens, n.eventDispatcher, n.notificationMgr)
 
        listenAddr := env.String("LISTEN", n.config.ApiAddress)
        env.Parse()
@@ -197,6 +201,14 @@ func (n *Node) initAndstartAPIServer() {
 }
 
 func (n *Node) OnStart() error {
+       if n.miningEnable {
+               if _, err := n.wallet.AccountMgr.GetMiningAddress(); err != nil {
+                       n.miningEnable = false
+                       log.Error(err)
+               } else {
+                       n.blockProposer.Start()
+               }
+       }
        if !n.config.VaultMode {
                if err := n.syncManager.Start(); err != nil {
                        return err
@@ -223,6 +235,9 @@ func (n *Node) OnStop() {
        n.notificationMgr.Shutdown()
        n.notificationMgr.WaitForShutdown()
        n.BaseService.OnStop()
+       if n.miningEnable {
+               n.blockProposer.Stop()
+       }
        if !n.config.VaultMode {
                n.syncManager.Stop()
        }
index 4dc51a5..a2242ff 100644 (file)
@@ -3,6 +3,7 @@ package dht
 import (
        "bytes"
        "crypto"
+       "crypto/ed25519"
        "encoding/hex"
        "errors"
        "fmt"
@@ -117,7 +118,7 @@ type timeoutEvent struct {
 
 func newNetwork(conn transport, ourPubkey crypto.PublicKey, dbPath string, netrestrict *netutil.Netlist) (*Network, error) {
        var ourID NodeID
-       copy(ourID[:], ourPubkey.([]byte)[:nodeIDBits])
+       copy(ourID[:], ourPubkey.(ed25519.PublicKey)[:nodeIDBits])
 
        var db *nodeDB
        if dbPath != "<no database>" {
index 9b4ff55..7c1fc62 100644 (file)
@@ -1,7 +1,6 @@
 package p2p
 
 import (
-       "crypto/ed25519"
        "fmt"
        "net"
        "sync"
@@ -90,7 +89,7 @@ func NewSwitch(config *cfg.Config) (*Switch, error) {
        if !config.VaultMode {
                // Create listener
                l, listenAddr = GetListener(config.P2P)
-               discv, err = dht.NewDiscover(config, ed25519.PrivateKey(bytes), l.ExternalAddress().Port)
+               discv, err = dht.NewDiscover(config, bytes, l.ExternalAddress().Port)
                if err != nil {
                        return nil, err
                }
index 250085d..04d3097 100644 (file)
@@ -54,6 +54,7 @@ func (b *BlockProposer) generateBlocks() {
                }
 
                bestBlockHeader := b.chain.BestBlockHeader()
+               bestBlockHash := bestBlockHeader.Hash()
 
                now := uint64(time.Now().UnixNano() / 1e6)
                base := now
@@ -66,16 +67,19 @@ func (b *BlockProposer) generateBlocks() {
                        nextBlockTime += consensus.ActiveNetParams.BlockTimeInterval
                }
 
-               //TODO: get proposer by block hash and timestamp
-               var proposer string
+               validator, err := b.chain.GetValidator(&bestBlockHash, nextBlockTime)
+               if err != nil {
+                       log.WithFields(log.Fields{"module": logModule, "error": err, "pubKey": xpubStr}).Error("fail on check is next blocker")
+                       continue
+               }
 
-               if xpubStr != proposer {
+               if xpubStr != validator.PubKey {
                        continue
                }
 
                warnDuration := time.Duration(consensus.ActiveNetParams.BlockTimeInterval*warnTimeNum/warnTimeDenom) * time.Millisecond
                criticalDuration := time.Duration(consensus.ActiveNetParams.BlockTimeInterval*criticalTimeNum/criticalTimeDenom) * time.Millisecond
-               block, err := proposal.NewBlockTemplate(b.chain, b.accountManager, nextBlockTime, warnDuration, criticalDuration)
+               block, err := proposal.NewBlockTemplate(b.chain, validator, b.accountManager, nextBlockTime, warnDuration, criticalDuration)
                if err != nil {
                        log.WithFields(log.Fields{"module": logModule, "error": err}).Error("failed on create NewBlockTemplate")
                        continue
index 4374cad..9d89345 100644 (file)
@@ -30,13 +30,14 @@ const (
 )
 
 // NewBlockTemplate returns a new block template that is ready to be solved
-func NewBlockTemplate(chain *protocol.Chain, accountManager *account.Manager, timestamp uint64, warnDuration, criticalDuration time.Duration) (*types.Block, error) {
-       builder := newBlockBuilder(chain, accountManager, timestamp, warnDuration, criticalDuration)
+func NewBlockTemplate(chain *protocol.Chain, validator *state.Validator, accountManager *account.Manager, timestamp uint64, warnDuration, criticalDuration time.Duration) (*types.Block, error) {
+       builder := newBlockBuilder(chain, validator, accountManager, timestamp, warnDuration, criticalDuration)
        return builder.build()
 }
 
 type blockBuilder struct {
        chain          *protocol.Chain
+       validator      *state.Validator
        accountManager *account.Manager
 
        block    *types.Block
@@ -48,7 +49,7 @@ type blockBuilder struct {
        gasLeft           int64
 }
 
-func newBlockBuilder(chain *protocol.Chain, accountManager *account.Manager, timestamp uint64, warnDuration, criticalDuration time.Duration) *blockBuilder {
+func newBlockBuilder(chain *protocol.Chain, validator *state.Validator, accountManager *account.Manager, timestamp uint64, warnDuration, criticalDuration time.Duration) *blockBuilder {
        preBlockHeader := chain.BestBlockHeader()
        block := &types.Block{
                BlockHeader: types.BlockHeader{
@@ -62,6 +63,7 @@ func newBlockBuilder(chain *protocol.Chain, accountManager *account.Manager, tim
 
        builder := &blockBuilder{
                chain:             chain,
+               validator:         validator,
                accountManager:    accountManager,
                block:             block,
                utxoView:          state.NewUtxoViewpoint(),
@@ -74,11 +76,13 @@ func newBlockBuilder(chain *protocol.Chain, accountManager *account.Manager, tim
 }
 
 func (b *blockBuilder) build() (*types.Block, error) {
-       if err := b.applyCoinbaseTransaction(); err != nil {
+       b.block.Transactions = []*types.Tx{nil}
+       feeAmount, err := b.applyTransactionFromPool()
+       if err != nil {
                return nil, err
        }
 
-       if err := b.applyTransactionFromPool(); err != nil {
+       if err := b.applyCoinbaseTransaction(feeAmount); err != nil {
                return nil, err
        }
 
@@ -86,12 +90,13 @@ func (b *blockBuilder) build() (*types.Block, error) {
                return nil, err
        }
 
-       b.chain.SignBlockHeader(&b.block.BlockHeader)
+       blockHeader := &b.block.BlockHeader
+       b.chain.SignBlockHeader(blockHeader)
        return b.block, nil
 }
 
-func (b *blockBuilder) applyCoinbaseTransaction() error {
-       coinbaseTx, err := b.createCoinbaseTx()
+func (b *blockBuilder) applyCoinbaseTransaction(feeAmount uint64) error {
+       coinbaseTx, err := b.createCoinbaseTx(feeAmount)
        if err != nil {
                return errors.Wrap(err, "fail on create coinbase tx")
        }
@@ -101,21 +106,15 @@ func (b *blockBuilder) applyCoinbaseTransaction() error {
                return err
        }
 
-       b.block.Transactions = append(b.block.Transactions, coinbaseTx)
+       b.block.Transactions[0] = coinbaseTx
        b.gasLeft -= gasState.GasUsed
        return nil
 }
 
-func (b *blockBuilder) applyTransactionFromPool() error {
+func (b *blockBuilder) applyTransactionFromPool() (uint64, error) {
        txDescList := b.chain.GetTxPool().GetTransactions()
        sort.Sort(byTime(txDescList))
-
-       poolTxs := make([]*types.Tx, len(txDescList))
-       for i, txDesc := range txDescList {
-               poolTxs[i] = txDesc.Tx
-       }
-
-       return b.applyTransactions(poolTxs, timeoutWarn)
+       return b.applyTransactions(txDescList, timeoutWarn)
 }
 
 func (b *blockBuilder) calculateBlockCommitment() (err error) {
@@ -135,7 +134,7 @@ func (b *blockBuilder) calculateBlockCommitment() (err error) {
 // createCoinbaseTx returns a coinbase transaction paying an appropriate subsidy
 // based on the passed block height to the provided address.  When the address
 // is nil, the coinbase transaction will instead be redeemable by anyone.
-func (b *blockBuilder) createCoinbaseTx() (tx *types.Tx, err error) {
+func (b *blockBuilder) createCoinbaseTx(feeAmount uint64) (tx *types.Tx, err error) {
        arbitrary := append([]byte{0x00}, []byte(strconv.FormatUint(b.block.Height, 10))...)
        var script []byte
        if b.accountManager == nil {
@@ -157,7 +156,8 @@ func (b *blockBuilder) createCoinbaseTx() (tx *types.Tx, err error) {
                return nil, err
        }
 
-       if err = builder.AddOutput(types.NewOriginalTxOutput(*consensus.BTMAssetID, 0, script, [][]byte{})); err != nil {
+       coinbaseAmount := consensus.BlockSubsidy(b.block.Height)
+       if err = builder.AddOutput(types.NewOriginalTxOutput(*consensus.BTMAssetID, coinbaseAmount + feeAmount, script, [][]byte{})); err != nil {
                return nil, err
        }
        //TODO: calculate reward to proposer
@@ -180,10 +180,11 @@ func (b *blockBuilder) createCoinbaseTx() (tx *types.Tx, err error) {
        return tx, nil
 }
 
-func (b *blockBuilder) applyTransactions(txs []*types.Tx, timeoutStatus uint8) error {
+func (b *blockBuilder) applyTransactions(txs []*protocol.TxDesc, timeoutStatus uint8) (uint64, error) {
+       var feeAmount uint64
        batchTxs := []*types.Tx{}
        for i := 0; i < len(txs); i++ {
-               if batchTxs = append(batchTxs, txs[i]); len(batchTxs) < batchApplyNum && i != len(txs)-1 {
+               if batchTxs = append(batchTxs, txs[i].Tx); len(batchTxs) < batchApplyNum && i != len(txs)-1 {
                        continue
                }
 
@@ -200,11 +201,12 @@ func (b *blockBuilder) applyTransactions(txs []*types.Tx, timeoutStatus uint8) e
 
                b.gasLeft = gasLeft
                batchTxs = batchTxs[:0]
+               feeAmount += txs[i].Fee
                if b.getTimeoutStatus() >= timeoutStatus || len(b.block.Transactions) > softMaxTxNum {
                        break
                }
        }
-       return nil
+       return feeAmount, nil
 }
 
 type validateTxResult struct {
index 4803d5a..bb13486 100644 (file)
@@ -47,7 +47,7 @@ func (c *Casper) ApplyBlock(block *types.Block) (*Verification, error) {
                c.newEpochCh <- block.Hash()
        }
 
-       return c.myVerification(target, validators)
+       return c.applyMyVerification(target, validators)
 }
 
 func (c *Casper) applyBlockToCheckpoint(block *types.Block) (*state.Checkpoint, error) {
@@ -60,12 +60,11 @@ func (c *Casper) applyBlockToCheckpoint(block *types.Block) (*state.Checkpoint,
        if mod := block.Height % state.BlocksOfEpoch; mod == 1 {
                parent := checkpoint
                checkpoint = &state.Checkpoint{
-                       ParentHash:     parent.Hash,
-                       Parent:         parent,
-                       StartTimestamp: block.Timestamp,
-                       Status:         state.Growing,
-                       Votes:          make(map[string]uint64),
-                       Guaranties:     make(map[string]uint64),
+                       ParentHash: parent.Hash,
+                       Parent:     parent,
+                       Status:     state.Growing,
+                       Votes:      make(map[string]uint64),
+                       Guaranties: make(map[string]uint64),
                }
                node.children = append(node.children, &treeNode{checkpoint: checkpoint})
        } else if mod == 0 {
@@ -74,7 +73,8 @@ func (c *Casper) applyBlockToCheckpoint(block *types.Block) (*state.Checkpoint,
 
        checkpoint.Height = block.Height
        checkpoint.Hash = block.Hash()
-       return checkpoint, nil
+       checkpoint.Timestamp = block.Timestamp
+       return checkpoint, c.store.SaveCheckpoints(checkpoint)
 }
 
 func (c *Casper) applyTransactions(target *state.Checkpoint, transactions []*types.Tx) error {
@@ -111,30 +111,59 @@ func (c *Casper) applyTransactions(target *state.Checkpoint, transactions []*typ
 }
 
 // applySupLinks copy the block's supLink to the checkpoint
-func (c *Casper) applySupLinks(target *state.Checkpoint, supLinks []*types.SupLink, validators []*state.Validator) error {
+func (c *Casper) applySupLinks(target *state.Checkpoint, supLinks []*types.SupLink, validators map[string]*state.Validator) error {
        if target.Height%state.BlocksOfEpoch != 0 {
                return nil
        }
 
        for _, supLink := range supLinks {
-               for _, verification := range supLinkToVerifications(supLink, validators, target.Hash, target.Height) {
-                       if err := c.verifyVerification(verification, true); err == nil {
-                               if err := c.addVerificationToCheckpoint(target, verification); err != nil {
-                                       return err
-                               }
+               var validVerifications []*Verification
+               for _, v := range supLinkToVerifications(supLink, validators, target.Hash, target.Height) {
+                       if validate(v) == nil && c.verifyVerification(v, validators[v.PubKey].Order, true) == nil {
+                               validVerifications = append(validVerifications, v)
                        }
                }
+               if err := c.addVerificationToCheckpoint(target, validators, validVerifications...); err != nil {
+                       return err
+               }
        }
        return nil
 }
 
-func (c *Casper) myVerification(target *state.Checkpoint, validators []*state.Validator) (*Verification, error) {
+func (c *Casper) applyMyVerification(target *state.Checkpoint, validators map[string]*state.Validator) (*Verification, error) {
+       if target.Height%state.BlocksOfEpoch != 0 {
+               return nil, nil
+       }
+
        pubKey := config.CommonConfig.PrivateKey().XPub().String()
-       if !isValidator(pubKey, validators) {
+       if _, ok := validators[pubKey]; !ok {
                return nil, nil
        }
 
+       validatorOrder := validators[pubKey].Order
+       v, err := c.myVerification(target, validatorOrder)
+       if err != nil {
+               return nil, err
+       }
+
+       if v == nil {
+               return nil, nil
+       }
+
+       if err := c.addVerificationToCheckpoint(target, validators, v); err != nil {
+               return nil, err
+       }
+
+       return v, c.saveVerificationToHeader(v, validatorOrder)
+}
+
+func (c *Casper) myVerification(target *state.Checkpoint, validatorOrder int) (*Verification, error) {
        source := c.lastJustifiedCheckpointOfBranch(target)
+       if target.ContainsVerification(source.Hash, validatorOrder) {
+               return nil, nil
+       }
+
+       pubKey := config.CommonConfig.PrivateKey().XPub().String()
        if source != nil {
                v := &Verification{
                        SourceHash:   source.Hash,
@@ -149,11 +178,11 @@ func (c *Casper) myVerification(target *state.Checkpoint, validators []*state.Va
                        return nil, err
                }
 
-               if err := c.verifyVerification(v, false); err != nil {
+               if err := c.verifyVerification(v, validatorOrder,false); err != nil {
                        return nil, nil
                }
 
-               return v, c.addVerificationToCheckpoint(target, v)
+               return v, nil
        }
        return nil, nil
 }
@@ -238,11 +267,17 @@ func (c *Casper) lastJustifiedCheckpointOfBranch(branch *state.Checkpoint) *stat
                case state.Justified:
                        return parent
                }
+               parent = parent.Parent
        }
        return nil
 }
 
-func supLinkToVerifications(supLink *types.SupLink, validators []*state.Validator, targetHash bc.Hash, targetHeight uint64) []*Verification {
+func supLinkToVerifications(supLink *types.SupLink, validators map[string]*state.Validator, targetHash bc.Hash, targetHeight uint64) []*Verification {
+       validatorList := make([]*state.Validator, len(validators))
+       for _, validator := range validators {
+               validatorList[validator.Order] = validator
+       }
+
        var result []*Verification
        for i, signature := range supLink.Signatures {
                result = append(result, &Verification{
@@ -251,7 +286,7 @@ func supLinkToVerifications(supLink *types.SupLink, validators []*state.Validato
                        SourceHeight: supLink.SourceHeight,
                        TargetHeight: targetHeight,
                        Signature:    hex.EncodeToString(signature),
-                       PubKey:       validators[i].PubKey,
+                       PubKey:       validatorList[i].PubKey,
                })
        }
        return result
index 8696f0b..4682d93 100644 (file)
@@ -1,6 +1,7 @@
 package protocol
 
 import (
+       "encoding/hex"
        "fmt"
 
        log "github.com/sirupsen/logrus"
@@ -18,15 +19,25 @@ func (c *Casper) AuthVerification(v *Verification) error {
                return err
        }
 
+       target, err := c.store.GetCheckpoint(&v.TargetHash)
+       if err != nil {
+               c.verificationCache.Add(verificationCacheKey(v.TargetHash, v.PubKey), v)
+               return nil
+       }
+
        validators, err := c.Validators(&v.TargetHash)
        if err != nil {
                return err
        }
 
-       if !isValidator(v.PubKey, validators) {
+        if _, ok := validators[v.PubKey]; !ok {
                return errPubKeyIsNotValidator
        }
 
+       if target.ContainsVerification(v.SourceHash, validators[v.PubKey].Order) {
+               return nil
+       }
+
        c.mu.Lock()
        defer c.mu.Unlock()
 
@@ -37,42 +48,45 @@ func (c *Casper) AuthVerification(v *Verification) error {
                return nil
        }
 
-       return c.authVerification(v)
+       return c.authVerification(v, target, validators)
 }
 
-func (c *Casper) authVerification(v *Verification) error {
-       target, err := c.store.GetCheckpoint(&v.TargetHash)
-       if err != nil {
-               c.verificationCache.Add(verificationCacheKey(v.TargetHash, v.PubKey), v)
-               return nil
+func (c *Casper) authVerification(v *Verification, target *state.Checkpoint, validators map[string]*state.Validator) error {
+       validator := validators[v.PubKey]
+       if err := c.verifyVerification(v, validator.Order, true); err != nil {
+               return err
        }
 
-       if err := c.verifyVerification(v, true); err != nil {
+       if err := c.addVerificationToCheckpoint(target, validators, v); err != nil {
                return err
        }
 
-       return c.addVerificationToCheckpoint(target, v)
+       return c.saveVerificationToHeader(v, validator.Order)
 }
 
-func (c *Casper) addVerificationToCheckpoint(target *state.Checkpoint, v *Verification) error {
-       source, err := c.store.GetCheckpoint(&v.SourceHash)
-       if err != nil {
-               return err
-       }
+func (c *Casper) addVerificationToCheckpoint(target *state.Checkpoint, validators map[string]*state.Validator, verifications ...*Verification) error {
+       _, oldBestHash := c.bestChain()
+       var affectedCheckpoints []*state.Checkpoint
+       for _, v := range verifications {
+               source, err := c.store.GetCheckpoint(&v.SourceHash)
+               if err != nil {
+                       return err
+               }
 
-       supLink := target.AddVerification(v.SourceHash, v.SourceHeight, v.PubKey, v.Signature)
-       if target.Status != state.Unjustified || !supLink.IsMajority() || source.Status == state.Finalized {
-               return nil
-       }
+               supLink := target.AddVerification(v.SourceHash, v.SourceHeight, validators[v.PubKey].Order, v.Signature)
+               if target.Status != state.Unjustified || !supLink.IsMajority() || source.Status == state.Finalized {
+                       continue
+               }
 
-       if source.Status == state.Unjustified {
-               c.justifyingCheckpoints[source.Hash] = append(c.justifyingCheckpoints[source.Hash], target)
-               return nil
+               if source.Status == state.Unjustified {
+                       c.justifyingCheckpoints[source.Hash] = append(c.justifyingCheckpoints[source.Hash], target)
+                       continue
+               }
+
+               affectedCheckpoints = append(affectedCheckpoints, c.setJustified(source, target)...)
        }
 
-       _, oldBestHash := c.BestChain()
-       affectedCheckpoints := c.setJustified(source, target)
-       _, newBestHash := c.BestChain()
+       _, newBestHash := c.bestChain()
        if oldBestHash != newBestHash {
                c.rollbackNotifyCh <- nil
        }
@@ -80,28 +94,37 @@ func (c *Casper) addVerificationToCheckpoint(target *state.Checkpoint, v *Verifi
        return c.store.SaveCheckpoints(affectedCheckpoints...)
 }
 
+func (c *Casper) saveVerificationToHeader(v *Verification, validatorOrder int) error {
+       blockHeader, err := c.store.GetBlockHeader(&v.TargetHash)
+       if err != nil {
+               return err
+       }
+
+       signature, err := hex.DecodeString(v.Signature)
+       if err != nil {
+               return err
+       }
+
+       blockHeader.SupLinks.AddSupLink(v.SourceHeight, v.SourceHash, signature, validatorOrder)
+       return c.store.SaveBlockHeader(blockHeader)
+}
+
+// source status is justified, and exist a super majority link from source to target
 func (c *Casper) setJustified(source, target *state.Checkpoint) []*state.Checkpoint {
-       affectedCheckpoints := make(map[bc.Hash]*state.Checkpoint)
+       var affectedCheckpoint []*state.Checkpoint
        target.Status = state.Justified
-       affectedCheckpoints[target.Hash] = target
        // must direct child
        if target.Parent.Hash == source.Hash {
                c.setFinalized(source)
-               affectedCheckpoints[source.Hash] = source
        }
 
        for _, checkpoint := range c.justifyingCheckpoints[target.Hash] {
-               for _, c := range c.setJustified(target, checkpoint) {
-                       affectedCheckpoints[c.Hash] = c
-               }
+               affectedCheckpoint = append(affectedCheckpoint, c.setJustified(target, checkpoint)...)
        }
-       delete(c.justifyingCheckpoints, target.Hash)
 
-       var result []*state.Checkpoint
-       for _, c := range affectedCheckpoints {
-               result = append(result, c)
-       }
-       return result
+       delete(c.justifyingCheckpoints, target.Hash)
+       affectedCheckpoint = append(affectedCheckpoint, source, target)
+       return affectedCheckpoint
 }
 
 func (c *Casper) setFinalized(checkpoint *state.Checkpoint) {
@@ -129,8 +152,16 @@ func (c *Casper) authVerificationLoop() {
                                continue
                        }
 
+                       v := verification.(*Verification)
+                       target, err := c.store.GetCheckpoint(&v.TargetHash)
+                       if err != nil {
+                               log.WithField("err", err).Error("get target checkpoint")
+                               c.verificationCache.Remove(key)
+                               continue
+                       }
+
                        c.mu.Lock()
-                       if err := c.authVerification(verification.(*Verification)); err != nil {
+                       if err := c.authVerification(v, target, validators); err != nil {
                                log.WithField("err", err).Error("auth verification in cache")
                        }
                        c.mu.Unlock()
@@ -140,16 +171,16 @@ func (c *Casper) authVerificationLoop() {
        }
 }
 
-func (c *Casper) verifyVerification(v *Verification, trackEvilValidator bool) error {
-       if err := c.verifySameHeight(v, trackEvilValidator); err != nil {
+func (c *Casper) verifyVerification(v *Verification, validatorOrder int, trackEvilValidator bool) error {
+       if err := c.verifySameHeight(v, validatorOrder, trackEvilValidator); err != nil {
                return err
        }
 
-       return c.verifySpanHeight(v, trackEvilValidator)
+       return c.verifySpanHeight(v, validatorOrder, trackEvilValidator)
 }
 
 // a validator must not publish two distinct votes for the same target height
-func (c *Casper) verifySameHeight(v *Verification, trackEvilValidator bool) error {
+func (c *Casper) verifySameHeight(v *Verification, validatorOrder int, trackEvilValidator bool) error {
        checkpoints, err := c.store.GetCheckpointsByHeight(v.TargetHeight)
        if err != nil {
                return err
@@ -157,9 +188,9 @@ func (c *Casper) verifySameHeight(v *Verification, trackEvilValidator bool) erro
 
        for _, checkpoint := range checkpoints {
                for _, supLink := range checkpoint.SupLinks {
-                       if _, ok := supLink.Signatures[v.PubKey]; ok && checkpoint.Hash != v.TargetHash {
+                       if supLink.Signatures[validatorOrder] != "" && checkpoint.Hash != v.TargetHash {
                                if trackEvilValidator {
-                                       c.evilValidators[v.PubKey] = []*Verification{v, makeVerification(supLink, checkpoint, v.PubKey)}
+                                       c.evilValidators[v.PubKey] = []*Verification{v, makeVerification(supLink, checkpoint, v.PubKey, validatorOrder)}
                                }
                                return errSameHeightInVerification
                        }
@@ -169,18 +200,18 @@ func (c *Casper) verifySameHeight(v *Verification, trackEvilValidator bool) erro
 }
 
 // a validator must not vote within the span of its other votes.
-func (c *Casper) verifySpanHeight(v *Verification, trackEvilValidator bool) error {
+func (c *Casper) verifySpanHeight(v *Verification, validatorOrder int, trackEvilValidator bool) error {
        if c.tree.findOnlyOne(func(checkpoint *state.Checkpoint) bool {
                if checkpoint.Height == v.TargetHeight {
                        return false
                }
 
                for _, supLink := range checkpoint.SupLinks {
-                       if _, ok := supLink.Signatures[v.PubKey]; ok {
+                       if supLink.Signatures[validatorOrder] != "" {
                                if (checkpoint.Height < v.TargetHeight && supLink.SourceHeight > v.SourceHeight) ||
                                        (checkpoint.Height > v.TargetHeight && supLink.SourceHeight < v.SourceHeight) {
                                        if trackEvilValidator {
-                                               c.evilValidators[v.PubKey] = []*Verification{v, makeVerification(supLink, checkpoint, v.PubKey)}
+                                               c.evilValidators[v.PubKey] = []*Verification{v, makeVerification(supLink, checkpoint, v.PubKey, validatorOrder)}
                                        }
                                        return true
                                }
@@ -193,13 +224,13 @@ func (c *Casper) verifySpanHeight(v *Verification, trackEvilValidator bool) erro
        return nil
 }
 
-func makeVerification(supLink *state.SupLink, checkpoint *state.Checkpoint, pubKey string) *Verification {
+func makeVerification(supLink *state.SupLink, checkpoint *state.Checkpoint, pubKey string, validatorOrder int) *Verification {
        return &Verification{
                SourceHash:   supLink.SourceHash,
                TargetHash:   checkpoint.Hash,
                SourceHeight: supLink.SourceHeight,
                TargetHeight: checkpoint.Height,
-               Signature:    supLink.Signatures[pubKey],
+               Signature:    supLink.Signatures[validatorOrder],
                PubKey:       pubKey,
        }
 }
index 91ee288..1ece0b6 100644 (file)
@@ -11,6 +11,20 @@ import (
 // SupLinks is alias of SupLink slice
 type SupLinks []*SupLink
 
+// AddSupLink used to add a supLink by specified validator
+func (s *SupLinks) AddSupLink(sourceHeight uint64, sourceHash bc.Hash, signature []byte, validatorOrder int) {
+       for _, supLink := range *s {
+               if supLink.SourceHash == sourceHash {
+                       supLink.Signatures[validatorOrder] = signature
+                       return
+               }
+       }
+
+       supLink := &SupLink{SourceHeight: sourceHeight, SourceHash: sourceHash}
+       supLink.Signatures[validatorOrder] = signature
+       *s = append(*s, supLink)
+}
+
 func (s *SupLinks) readFrom(r *blockchain.Reader) (err error) {
        size, err := blockchain.ReadVarint31(r)
        if err != nil {
index 8375d3b..db4f460 100644 (file)
@@ -91,8 +91,10 @@ func (c *Chain) connectBlock(block *types.Block) (err error) {
                return err
        }
 
-       if err := c.broadcastVerification(verification); err != nil {
-               return err
+       if verification != nil {
+               if err := c.broadcastVerification(verification); err != nil {
+                       return err
+               }
        }
 
        contractView := state.NewContractViewpoint()
index 492e8c1..ed265d2 100644 (file)
@@ -70,6 +70,10 @@ func (c *Casper) BestChain() (uint64, bc.Hash) {
        c.mu.RLock()
        defer c.mu.RUnlock()
 
+       return c.bestChain()
+}
+
+func (c *Casper) bestChain() (uint64, bc.Hash) {
        // root is init justified
        root := c.tree.checkpoint
        bestHeight, bestHash, _ := chainOfMaxJustifiedHeight(c.tree, root.Height)
@@ -87,18 +91,31 @@ func (c *Casper) LastFinalized() (uint64, bc.Hash) {
 
 // Validators return the validators by specified block hash
 // e.g. if the block num of epoch is 100, and the block height corresponding to the block hash is 130, then will return the voting results of height in 0~100
-func (c *Casper) Validators(blockHash *bc.Hash) ([]*state.Validator, error) {
+func (c *Casper) Validators(blockHash *bc.Hash) (map[string]*state.Validator, error) {
+       checkpoint, err := c.prevCheckpoint(blockHash)
+       if err != nil {
+               return nil, err
+       }
+
+       return checkpoint.Validators(), nil
+}
+
+func (c *Casper) prevCheckpoint(blockHash *bc.Hash) (*state.Checkpoint, error) {
        hash, err := c.prevCheckpointHash(blockHash)
        if err != nil {
                return nil, err
        }
 
-       checkpoint, err := c.store.GetCheckpoint(hash)
+       return c.store.GetCheckpoint(hash)
+}
+
+func (c *Casper) prevCheckpointByPrevHash(prevBlockHash *bc.Hash) (*state.Checkpoint, error) {
+       hash, err := c.prevCheckpointHashByPrevHash(prevBlockHash)
        if err != nil {
                return nil, err
        }
 
-       return checkpoint.Validators(), nil
+       return c.store.GetCheckpoint(hash)
 }
 
 // EvilValidator represent a validator who broadcast two distinct verification that violate the commandment
@@ -140,37 +157,43 @@ func chainOfMaxJustifiedHeight(node *treeNode, justifiedHeight uint64) (uint64,
        return bestHeight, bestHash, maxJustifiedHeight
 }
 
-func isValidator(pubKey string, validators []*state.Validator) bool {
-       for _, v := range validators {
-               if v.PubKey == pubKey {
-                       return true
-               }
-       }
-       return false
-}
-
 func (c *Casper) prevCheckpointHash(blockHash *bc.Hash) (*bc.Hash, error) {
        if data, ok := c.prevCheckpointCache.Get(*blockHash); ok {
                return data.(*bc.Hash), nil
        }
 
-       for {
-               block, err := c.store.GetBlockHeader(blockHash)
-               if err != nil {
-                       return nil, err
-               }
+       block, err := c.store.GetBlockHeader(blockHash)
+       if err != nil {
+               return nil, err
+       }
 
-               prevHeight, prevHash := block.Height-1, block.PreviousBlockHash
+       result, err := c.prevCheckpointHashByPrevHash(&block.PreviousBlockHash)
+       if err != nil {
+               return nil, err
+       }
+
+       c.prevCheckpointCache.Add(blockHash, result)
+       return result, nil
+}
+
+func (c *Casper) prevCheckpointHashByPrevHash(prevBlockHash *bc.Hash) (*bc.Hash, error) {
+       prevHash := prevBlockHash
+       for {
                if data, ok := c.prevCheckpointCache.Get(prevHash); ok {
-                       c.prevCheckpointCache.Add(blockHash, data)
+                       c.prevCheckpointCache.Add(prevBlockHash, data)
                        return data.(*bc.Hash), nil
                }
 
-               if prevHeight%state.BlocksOfEpoch == 0 {
-                       c.prevCheckpointCache.Add(blockHash, &prevHash)
-                       return &prevHash, nil
+               prevBlock, err := c.store.GetBlockHeader(prevHash)
+               if err != nil {
+                       return nil, err
+               }
+
+               if prevBlock.Height%state.BlocksOfEpoch == 0 {
+                       c.prevCheckpointCache.Add(prevBlockHash, prevHash)
+                       return prevHash, nil
                }
 
-               blockHash = &prevHash
+               prevHash = &prevBlock.PreviousBlockHash
        }
 }
index b01b73a..aac4bb3 100644 (file)
@@ -6,6 +6,8 @@ import (
        log "github.com/sirupsen/logrus"
 
        "github.com/bytom/bytom/config"
+       "github.com/bytom/bytom/consensus"
+       "github.com/bytom/bytom/errors"
        "github.com/bytom/bytom/event"
        "github.com/bytom/bytom/protocol/bc"
        "github.com/bytom/bytom/protocol/bc/types"
@@ -30,13 +32,14 @@ type Chain struct {
 }
 
 // NewChain returns a new Chain using store as the underlying storage.
-func NewChain(store Store, txPool *TxPool) (*Chain, error) {
-       return NewChainWithOrphanManage(store, txPool, NewOrphanManage())
+func NewChain(store Store, txPool *TxPool, eventDispatcher *event.Dispatcher) (*Chain, error) {
+       return NewChainWithOrphanManage(store, txPool, NewOrphanManage(), eventDispatcher)
 }
 
-func NewChainWithOrphanManage(store Store, txPool *TxPool, manage *OrphanManage) (*Chain, error) {
+func NewChainWithOrphanManage(store Store, txPool *TxPool, manage *OrphanManage, eventDispatcher *event.Dispatcher) (*Chain, error) {
        c := &Chain{
                orphanManage:     manage,
+               eventDispatcher:  eventDispatcher,
                txPool:           txPool,
                store:            store,
                rollbackNotifyCh: make(chan interface{}),
@@ -77,10 +80,10 @@ func (c *Chain) initChainStatus() error {
        }
 
        checkpoint := &state.Checkpoint{
-               Height:         0,
-               Hash:           genesisBlock.Hash(),
-               StartTimestamp: genesisBlock.Timestamp,
-               Status:         state.Justified,
+               Height:    0,
+               Hash:      genesisBlock.Hash(),
+               Timestamp: genesisBlock.Timestamp,
+               Status:    state.Justified,
        }
        if err := c.store.SaveCheckpoints(checkpoint); err != nil {
                return err
@@ -141,6 +144,33 @@ func (c *Chain) latestBestBlockHash() *bc.Hash {
        return &hash
 }
 
+// GetValidator return validator by specified blockHash and timestamp
+func (c *Chain) GetValidator(prevHash *bc.Hash, timeStamp uint64) (*state.Validator, error) {
+       prevCheckpoint, err := c.casper.prevCheckpointByPrevHash(prevHash)
+       if err != nil {
+               return nil, err
+       }
+
+       validators := prevCheckpoint.Validators()
+       startTimestamp := prevCheckpoint.Timestamp + consensus.ActiveNetParams.BlockTimeInterval
+       order := getValidatorOrder(startTimestamp, timeStamp, uint64(len(validators)))
+       for _, validator := range validators {
+               if validator.Order == int(order) {
+                       return validator, nil
+               }
+       }
+       return nil, errors.New("get blocker failure")
+}
+
+func getValidatorOrder(startTimestamp, blockTimestamp, numOfConsensusNode uint64) uint64 {
+       // One round of product block time for all consensus nodes
+       roundBlockTime := state.BlocksOfEpoch * numOfConsensusNode * consensus.ActiveNetParams.BlockTimeInterval
+       // The start time of the last round of product block
+       lastRoundStartTime := startTimestamp + (blockTimestamp-startTimestamp)/roundBlockTime*roundBlockTime
+       // Order of blocker
+       return (blockTimestamp - lastRoundStartTime) / (state.BlocksOfEpoch * consensus.ActiveNetParams.BlockTimeInterval)
+}
+
 // BestBlockHeader returns the chain tail block
 func (c *Chain) BestBlockHeader() *types.BlockHeader {
        node := c.index.BestNode()
@@ -157,8 +187,6 @@ func (c *Chain) GetBlockIndex() *state.BlockIndex {
 }
 
 func (c *Chain) SignBlockHeader(blockHeader *types.BlockHeader) {
-       c.cond.L.Lock()
-       defer c.cond.L.Unlock()
        xprv := config.CommonConfig.PrivateKey()
        signature := xprv.Sign(blockHeader.Hash().Bytes())
        blockHeader.Set(signature)
index f174006..c12edb1 100644 (file)
@@ -3,6 +3,7 @@ package state
 import (
        "sort"
 
+       "github.com/bytom/bytom/consensus"
        "github.com/bytom/bytom/protocol/bc"
 )
 
@@ -10,7 +11,6 @@ const (
        // BlocksOfEpoch represent the block num in one epoch
        BlocksOfEpoch   = 100
        minMortgage     = 1000000
-       numOfValidators = 10
 )
 
 // CheckpointStatus represent current status of checkpoint
@@ -35,12 +35,18 @@ const (
 type SupLink struct {
        SourceHeight uint64
        SourceHash   bc.Hash
-       Signatures   map[string]string // pubKey to signature
+       Signatures   [consensus.NumOfValidators]string
 }
 
 // IsMajority if at least 2/3 of validators have published votes with sup link
 func (s *SupLink) IsMajority() bool {
-       return len(s.Signatures) > numOfValidators*2/3
+       numOfSignatures := 0
+       for _, signature := range s.Signatures {
+               if signature != "" {
+                       numOfSignatures++
+               }
+       }
+       return numOfSignatures > consensus.NumOfValidators*2/3
 }
 
 // Checkpoint represent the block/hash under consideration for finality for a given epoch.
@@ -52,45 +58,56 @@ type Checkpoint struct {
        Hash       bc.Hash
        ParentHash bc.Hash
        // only save in the memory, not be persisted
-       Parent         *Checkpoint `json:"-"`
-       StartTimestamp uint64
-       SupLinks       []*SupLink
-       Status         CheckpointStatus
+       Parent    *Checkpoint `json:"-"`
+       Timestamp uint64
+       SupLinks  []*SupLink  `json:"-"`
+       Status    CheckpointStatus
 
        Votes      map[string]uint64 // putKey -> num of vote
        Guaranties map[string]uint64 // pubKey -> num of guaranty
 }
 
 // AddVerification add a valid verification to checkpoint's supLink
-func (c *Checkpoint) AddVerification(sourceHash bc.Hash, sourceHeight uint64, pubKey, signature string) *SupLink {
+func (c *Checkpoint) AddVerification(sourceHash bc.Hash, sourceHeight uint64, validatorOrder int, signature string) *SupLink {
        for _, supLink := range c.SupLinks {
                if supLink.SourceHash == sourceHash {
-                       supLink.Signatures[pubKey] = signature
+                       supLink.Signatures[validatorOrder] = signature
                        return supLink
                }
        }
        supLink := &SupLink{
                SourceHeight: sourceHeight,
                SourceHash:   sourceHash,
-               Signatures:   map[string]string{pubKey: signature},
        }
+       supLink.Signatures[validatorOrder] = signature
        c.SupLinks = append(c.SupLinks, supLink)
        return supLink
 }
 
+// ContainsVerification return whether the specified validator has add verification to current checkpoint
+func (c *Checkpoint) ContainsVerification(sourceHash bc.Hash, validatorOrder int) bool {
+       for _, supLink := range c.SupLinks {
+               if supLink.SourceHash == sourceHash && supLink.Signatures[validatorOrder] != "" {
+                       return true
+               }
+       }
+       return false
+}
+
 // Validator represent the participants of the PoS network
 // Responsible for block generation and verification
 type Validator struct {
        PubKey   string
+       Order    int
        Vote     uint64
        Guaranty uint64
 }
 
 // Validators return next epoch of validators, if the status of checkpoint is growing, return empty
-func (c *Checkpoint) Validators() []*Validator {
+func (c *Checkpoint) Validators() map[string]*Validator {
        var validators []*Validator
        if c.Status == Growing {
-               return validators
+               return nil
        }
 
        for pubKey, mortgageNum := range c.Guaranties {
@@ -107,9 +124,12 @@ func (c *Checkpoint) Validators() []*Validator {
                return validators[i].Guaranty+validators[i].Vote > validators[j].Guaranty+validators[j].Vote
        })
 
-       end := numOfValidators
-       if len(validators) < numOfValidators {
-               end = len(validators)
+       result := make(map[string]*Validator)
+       for i := 0; i < len(validators) && i < consensus.NumOfValidators; i++ {
+               validator := validators[i]
+               validator.Order = i
+               result[validator.PubKey] = validator
        }
-       return validators[:end]
+
+       return result
 }
index e87180f..09dd762 100644 (file)
@@ -25,6 +25,7 @@ type Store interface {
 
        LoadBlockIndex(uint64) (*state.BlockIndex, error)
        SaveBlock(*types.Block) error
+       SaveBlockHeader(*types.BlockHeader) error
        SaveChainStatus(*state.BlockNode, *state.UtxoViewpoint, *state.ContractViewpoint, uint64, *bc.Hash) error
 }
 
index ac3da93..3d0dd83 100644 (file)
@@ -25,6 +25,7 @@ func makeTree(root *state.Checkpoint, successors []*state.Checkpoint) *treeNode
                node := nodes[0]
                for _, successor := range parentToSuccessors[node.checkpoint.Hash] {
                        child := &treeNode{checkpoint: successor}
+                       successor.Parent = node.checkpoint
                        node.children = append(node.children, child)
                        nodes = append(nodes, child)
                }
index 9b4773b..0e37af7 100644 (file)
@@ -110,6 +110,7 @@ func (s *mockStore) GetUtxo(*bc.Hash) (*storage.UtxoEntry, error)
 func (s *mockStore) GetContract(hash [32]byte) ([]byte, error)                    { return nil, nil }
 func (s *mockStore) LoadBlockIndex(uint64) (*state.BlockIndex, error)             { return nil, nil }
 func (s *mockStore) SaveBlock(*types.Block) error                                 { return nil }
+func (s *mockStore) SaveBlockHeader(*types.BlockHeader) error                     { return nil }
 func (s *mockStore) SaveChainStatus(*state.BlockNode, *state.UtxoViewpoint, *state.ContractViewpoint, uint64, *bc.Hash) error {
        return nil
 }
@@ -611,7 +612,8 @@ func (s *mockStore1) GetTransactionsUtxo(utxoView *state.UtxoViewpoint, tx []*bc
 func (s *mockStore1) GetUtxo(*bc.Hash) (*storage.UtxoEntry, error)        { return nil, nil }
 func (s *mockStore1) GetContract(hash [32]byte) ([]byte, error)           { return nil, nil }
 func (s *mockStore1) LoadBlockIndex(uint64) (*state.BlockIndex, error)    { return nil, nil }
-func (s *mockStore1) SaveBlock(*types.Block) error { return nil }
+func (s *mockStore1) SaveBlock(*types.Block) error                        { return nil }
+func (s *mockStore1) SaveBlockHeader(*types.BlockHeader) error            { return nil }
 func (s *mockStore1) SaveChainStatus(*state.BlockNode, *state.UtxoViewpoint, *state.ContractViewpoint, uint64, *bc.Hash) error { return nil}
 
 func TestProcessTransaction(t *testing.T) {
index 59b1411..5d9bc10 100644 (file)
@@ -24,14 +24,15 @@ var (
        errVersionRegression     = errors.New("version regression")
 )
 
-func checkBlockTime(b *bc.Block, parent *state.BlockNode) error {
-       if b.Timestamp > uint64(time.Now().Unix())+consensus.MaxTimeOffsetSeconds {
+func checkBlockTime(b *bc.Block, parent *types.BlockHeader) error {
+       now := uint64(time.Now().UnixNano() / 1e6)
+       if b.Timestamp < (parent.Timestamp + consensus.ActiveNetParams.BlockTimeInterval) {
                return errBadTimestamp
        }
-
-       if b.Timestamp <= parent.CalcPastMedianTime() {
+       if b.Timestamp > (now + consensus.ActiveNetParams.MaxTimeOffsetMs) {
                return errBadTimestamp
        }
+
        return nil
 }
 
@@ -69,7 +70,7 @@ func ValidateBlockHeader(b *bc.Block, parent *state.BlockNode) error {
                return errors.WithDetailf(errMismatchedBlock, "previous block ID %x, current block wants %x", parent.Hash.Bytes(), b.PreviousBlockId.Bytes())
        }
 
-       if err := checkBlockTime(b, parent); err != nil {
+       if err := checkBlockTime(b, parent.BlockHeader()); err != nil {
                return err
        }
        return nil
index 0ea31e4..0c86f4c 100644 (file)
@@ -20,31 +20,31 @@ func TestCheckBlockTime(t *testing.T) {
                err        error
        }{
                {
-                       blockTime:  1520000001,
+                       blockTime:  1520006000,
                        parentTime: []uint64{1520000000},
                        err:        nil,
                },
                {
-                       desc:       "timestamp less than past median time (blocktest#1005)",
-                       blockTime:  1510000094,
-                       parentTime: []uint64{1520000000, 1510000099, 1510000098, 1510000097, 1510000096, 1510000095, 1510000094, 1510000093, 1510000092, 1510000091, 1510000090},
-                       err:        errBadTimestamp,
+                       desc:       "timestamp less than past median time",
+                       blockTime:  1520006000,
+                       parentTime: []uint64{1520000000, 1520000500, 1520001000, 1520001500, 1520002000, 1520002500, 1520003000, 1520003500, 1520004000, 1520004500, 1520005000},
+                       err:        nil,
                },
                {
-                       desc:       "timestamp greater than max limit (blocktest#1006)",
-                       blockTime:  9999999999,
-                       parentTime: []uint64{1520000000},
+                       desc:       "timestamp greater than max limit",
+                       blockTime:  99999999990000,
+                       parentTime: []uint64{15200000000000},
                        err:        errBadTimestamp,
                },
                {
-                       desc:       "timestamp of the block and the parent block are both greater than max limit (blocktest#1007)",
-                       blockTime:  uint64(time.Now().Unix()) + consensus.MaxTimeOffsetSeconds + 2,
-                       parentTime: []uint64{uint64(time.Now().Unix()) + consensus.MaxTimeOffsetSeconds + 1},
+                       desc:       "timestamp of the block and the parent block are both greater than max limit",
+                       blockTime:  uint64(time.Now().UnixNano()/int64(time.Millisecond)) + consensus.ActiveNetParams.MaxTimeOffsetMs + 2000,
+                       parentTime: []uint64{uint64(time.Now().UnixNano()/int64(time.Millisecond)) + consensus.ActiveNetParams.MaxTimeOffsetMs + 1000},
                        err:        errBadTimestamp,
                },
        }
 
-       parent := &state.BlockNode{Version: 1}
+       parent := &types.BlockHeader{Version: 1}
        block := &bc.Block{
                BlockHeader: &bc.BlockHeader{Version: 1},
        }
@@ -53,8 +53,9 @@ func TestCheckBlockTime(t *testing.T) {
                parent.Timestamp = c.parentTime[0]
                parentSuccessor := parent
                for i := 1; i < len(c.parentTime); i++ {
-                       parentSuccessor.Parent = &state.BlockNode{Version: 1, Timestamp: c.parentTime[i]}
-                       parentSuccessor = parentSuccessor.Parent
+                       Previous := &types.BlockHeader{Version: 1, Timestamp: c.parentTime[i]}
+                       parentSuccessor.PreviousBlockHash = Previous.Hash()
+                       parentSuccessor = Previous
                }
 
                block.Timestamp = c.blockTime
@@ -153,7 +154,7 @@ func TestValidateBlockHeader(t *testing.T) {
                                BlockHeader: &bc.BlockHeader{
                                        Version:         1,
                                        Height:          1,
-                                       Timestamp:       1523352601,
+                                       Timestamp:       1523358600,
                                        PreviousBlockId: &bc.Hash{V0: 0},
                                },
                        },
@@ -230,7 +231,7 @@ func TestValidateBlock(t *testing.T) {
                                BlockHeader: &bc.BlockHeader{
                                        Version:          1,
                                        Height:           1,
-                                       Timestamp:        1523352601,
+                                       Timestamp:        1523358600,
                                        PreviousBlockId:  &bc.Hash{V0: 0},
                                        TransactionsRoot: &bc.Hash{V0: 1},
                                },
@@ -258,7 +259,7 @@ func TestValidateBlock(t *testing.T) {
                                BlockHeader: &bc.BlockHeader{
                                        Version:          1,
                                        Height:           1,
-                                       Timestamp:        1523352601,
+                                       Timestamp:        1523358600,
                                        PreviousBlockId:  &bc.Hash{V0: 0},
                                        TransactionsRoot: &bc.Hash{V0: 6294987741126419124, V1: 12520373106916389157, V2: 5040806596198303681, V3: 1151748423853876189},
                                },
@@ -286,7 +287,7 @@ func TestValidateBlock(t *testing.T) {
                                BlockHeader: &bc.BlockHeader{
                                        Version:         1,
                                        Height:          1,
-                                       Timestamp:       1523352601,
+                                       Timestamp:       1523358600,
                                        PreviousBlockId: &bc.Hash{V0: 0},
                                },
                                Transactions: []*bc.Tx{
@@ -337,7 +338,7 @@ func TestGasOverBlockLimit(t *testing.T) {
                BlockHeader: &bc.BlockHeader{
                        Version:          1,
                        Height:           1,
-                       Timestamp:        1523352601,
+                       Timestamp:        1523358600,
                        PreviousBlockId:  &bc.Hash{V0: 0},
                        TransactionsRoot: &bc.Hash{V0: 1},
                },
index 92cdf5d..46b19d6 100644 (file)
@@ -61,7 +61,7 @@ func (p *processBlockTestCase) Run() error {
        }
 
        txPool := protocol.NewTxPool(store, event.NewDispatcher())
-       chain, err := protocol.NewChainWithOrphanManage(store, txPool, orphanManage)
+       chain, err := protocol.NewChainWithOrphanManage(store, txPool, orphanManage, nil)
        if err != nil {
                return err
        }
index 60585bc..2da9979 100644 (file)
@@ -28,7 +28,7 @@ func MockChain(testDB dbm.DB) (*protocol.Chain, *database.Store, *protocol.TxPoo
        store := database.NewStore(testDB)
        dispatcher := event.NewDispatcher()
        txPool := protocol.NewTxPool(store, dispatcher)
-       chain, err := protocol.NewChain(store, txPool)
+       chain, err := protocol.NewChain(store, txPool, dispatcher)
        return chain, store, txPool, err
 }
 
index 35d5402..b8a7993 100644 (file)
@@ -121,7 +121,7 @@ func TestWalletUpdate(t *testing.T) {
        dispatcher := event.NewDispatcher()
        txPool := protocol.NewTxPool(store, dispatcher)
 
-       chain, err := protocol.NewChain(store, txPool)
+       chain, err := protocol.NewChain(store, txPool, dispatcher)
        if err != nil {
                t.Fatal(err)
        }
@@ -213,7 +213,7 @@ func TestRescanWallet(t *testing.T) {
        store := database.NewStore(testDB)
        dispatcher := event.NewDispatcher()
        txPool := protocol.NewTxPool(store, dispatcher)
-       chain, err := protocol.NewChain(store, txPool)
+       chain, err := protocol.NewChain(store, txPool, dispatcher)
        if err != nil {
                t.Fatal(err)
        }
@@ -262,7 +262,7 @@ func TestMemPoolTxQueryLoop(t *testing.T) {
        dispatcher := event.NewDispatcher()
        txPool := protocol.NewTxPool(store, dispatcher)
 
-       chain, err := protocol.NewChain(store, txPool)
+       chain, err := protocol.NewChain(store, txPool, dispatcher)
        if err != nil {
                t.Fatal(err)
        }