return cp, json.Unmarshal(rawProgram, cp)
}
+// GetMiningAddress will return the mining address
+func (m *Manager) GetMiningAddress() (string, error) {
+ cp, err := m.GetCoinbaseCtrlProgram()
+ if err != nil {
+ return "", err
+ }
+
+ return cp.Address, nil
+}
+
+// SetMiningAddress will set the mining address
+func (m *Manager) SetMiningAddress(miningAddress string) (string, error) {
+ program, err := m.getProgramByAddress(miningAddress)
+ if err != nil {
+ return "", err
+ }
+
+ cp := &CtrlProgram{
+ Address: miningAddress,
+ ControlProgram: program,
+ }
+ rawCP, err := json.Marshal(cp)
+ if err != nil {
+ return "", err
+ }
+
+ m.db.Set(miningAddressKey, rawCP)
+ return m.GetMiningAddress()
+}
+
// IsLocalControlProgram check is the input control program belong to local
func (m *Manager) IsLocalControlProgram(prog []byte) bool {
var hash common.Hash
store := database.NewStore(testDB)
txPool := protocol.NewTxPool(store, dispatcher)
- chain, err := protocol.NewChain(store, txPool)
+ chain, err := protocol.NewChain(store, txPool, dispatcher)
if err != nil {
t.Fatal(err)
}
start, end := getPageRange(len(addresses), ins.From, ins.Count)
return NewSuccessResponse(addresses[start:end])
}
+
+type minigAddressResp struct {
+ MiningAddress string `json:"mining_address"`
+}
+
+func (a *API) getMiningAddress(ctx context.Context) Response {
+ miningAddress, err := a.wallet.AccountMgr.GetMiningAddress()
+ if err != nil {
+ return NewErrorResponse(err)
+ }
+ return NewSuccessResponse(minigAddressResp{
+ MiningAddress: miningAddress,
+ })
+}
+
+// POST /set-mining-address
+func (a *API) setMiningAddress(ctx context.Context, in struct {
+ MiningAddress string `json:"mining_address"`
+}) Response {
+ miningAddress, err := a.wallet.AccountMgr.SetMiningAddress(in.MiningAddress)
+ if err != nil {
+ return NewErrorResponse(err)
+ }
+ return NewSuccessResponse(minigAddressResp{
+ MiningAddress: miningAddress,
+ })
+}
"github.com/bytom/bytom/net/websocket"
"github.com/bytom/bytom/netsync/peers"
"github.com/bytom/bytom/p2p"
+ "github.com/bytom/bytom/proposal/blockproposer"
"github.com/bytom/bytom/protocol"
"github.com/bytom/bytom/wallet"
)
chain *protocol.Chain
server *http.Server
handler http.Handler
+ blockProposer *blockproposer.BlockProposer
txFeedTracker *txfeed.Tracker
notificationMgr *websocket.WSNotificationManager
eventDispatcher *event.Dispatcher
}
// NewAPI create and initialize the API
-func NewAPI(sync NetSync, wallet *wallet.Wallet, txfeeds *txfeed.Tracker, chain *protocol.Chain, config *cfg.Config, token *accesstoken.CredentialStore, dispatcher *event.Dispatcher, notificationMgr *websocket.WSNotificationManager) *API {
+func NewAPI(sync NetSync, wallet *wallet.Wallet, blockProposer *blockproposer.BlockProposer, txfeeds *txfeed.Tracker, chain *protocol.Chain, config *cfg.Config, token *accesstoken.CredentialStore, dispatcher *event.Dispatcher, notificationMgr *websocket.WSNotificationManager) *API {
api := &API{
sync: sync,
wallet: wallet,
chain: chain,
accessTokens: token,
+ blockProposer: blockProposer,
txFeedTracker: txfeeds,
eventDispatcher: dispatcher,
notificationMgr: notificationMgr,
m.Handle("/validate-address", jsonHandler(a.validateAddress))
m.Handle("/list-pubkeys", jsonHandler(a.listPubKeys))
+ m.Handle("/get-mining-address", jsonHandler(a.getMiningAddress))
+ m.Handle("/set-mining-address", jsonHandler(a.setMiningAddress))
+
m.Handle("/create-asset", jsonHandler(a.createAsset))
m.Handle("/update-asset-alias", jsonHandler(a.updateAssetAlias))
m.Handle("/get-asset", jsonHandler(a.getAsset))
m.Handle("/get-block-header", jsonHandler(a.getBlockHeader))
m.Handle("/get-block-count", jsonHandler(a.getBlockCount))
+ m.Handle("/is-mining", jsonHandler(a.isMining))
+ m.Handle("/set-mining", jsonHandler(a.setMining))
+
m.Handle("/verify-message", jsonHandler(a.verifyMessage))
m.Handle("/compile", jsonHandler(a.compileEquity))
type NetInfo struct {
Listening bool `json:"listening"`
Syncing bool `json:"syncing"`
+ Mining bool `json:"mining"`
PeerCount int `json:"peer_count"`
CurrentBlock uint64 `json:"current_block"`
HighestBlock uint64 `json:"highest_block"`
info := &NetInfo{
Listening: a.sync.IsListening(),
Syncing: !a.sync.IsCaughtUp(),
+ Mining: a.blockProposer.IsProposing(),
PeerCount: a.sync.PeerCount(),
CurrentBlock: a.chain.BestBlockHeight(),
NetWorkID: a.sync.GetNetwork(),
return NewSuccessResponse(a.GetNodeInfo())
}
+// isMining return is in mining or not
+func (a *API) isMining() Response {
+ IsMining := map[string]bool{"is_mining": a.IsMining()}
+ return NewSuccessResponse(IsMining)
+}
+
+// IsProposing return mining status
+func (a *API) IsMining() bool {
+ return a.blockProposer.IsProposing()
+}
+
// return the peers of current node
func (a *API) listPeers() Response {
return NewSuccessResponse(a.sync.GetPeerInfos())
--- /dev/null
+package api
+
+import (
+ "errors"
+)
+
+func (a *API) setMining(in struct {
+ IsMining bool `json:"is_mining"`
+}) Response {
+ if in.IsMining {
+ if _, err := a.wallet.AccountMgr.GetMiningAddress(); err != nil {
+ return NewErrorResponse(errors.New("Mining address does not exist"))
+ }
+ return a.startMining()
+ }
+ return a.stopMining()
+}
+
+func (a *API) startMining() Response {
+ a.blockProposer.Start()
+ if !a.IsMining() {
+ return NewErrorResponse(errors.New("Failed to start mining"))
+ }
+ return NewSuccessResponse("")
+}
+
+func (a *API) stopMining() Response {
+ a.blockProposer.Stop()
+ if a.IsMining() {
+ return NewErrorResponse(errors.New("Failed to stop mining"))
+ }
+ return NewSuccessResponse("")
+}
store := database.NewStore(testDB)
dispatcher := event.NewDispatcher()
txPool := protocol.NewTxPool(store, dispatcher)
- chain, err := protocol.NewChain(store, txPool)
+ chain, err := protocol.NewChain(store, txPool, dispatcher)
if err != nil {
return nil, err
}
package commands
import (
+ "encoding/hex"
+ "io/ioutil"
"os"
"path"
"github.com/spf13/cobra"
cfg "github.com/bytom/bytom/config"
+ "github.com/bytom/bytom/crypto/ed25519/chainkd"
)
var initFilesCmd = &cobra.Command{
cfg.EnsureRoot(config.RootDir, "solonet")
}
+ //generate the node private key
+ keyFilePath := path.Join(config.RootDir, config.PrivateKeyFile)
+ if _, err := os.Stat(keyFilePath); os.IsNotExist(err) {
+ xprv, err := chainkd.NewXPrv(nil)
+ if err != nil {
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Fatal("fail on generate private key")
+ }
+
+ if err := ioutil.WriteFile(keyFilePath, []byte(hex.EncodeToString(xprv[:])), 0600); err != nil {
+ log.WithFields(log.Fields{"module": logModule, "err": err}).Fatal("fail on save private key")
+ }
+
+ log.WithFields(log.Fields{"pubkey": xprv.XPub()}).Info("success generate private")
+ }
+
log.WithFields(log.Fields{"module": logModule, "config": configFilePath}).Info("Initialized bytom")
}
func init() {
runNodeCmd.Flags().String("prof_laddr", config.ProfListenAddress, "Use http to profile bytomd programs")
-
+ runNodeCmd.Flags().Bool("mining", config.Mining, "Enable mining")
runNodeCmd.Flags().Bool("simd.enable", config.Simd.Enable, "Enable SIMD mechan for tensority")
runNodeCmd.Flags().Bool("auth.disable", config.Auth.Disable, "Disable rpc access authenticate")
// TCP or UNIX socket address for the profiling server to listen on
ProfListenAddress string `mapstructure:"prof_laddr"`
+ Mining bool `mapstructure:"mining"`
+
// Database backend: leveldb | memdb
DBBackend string `mapstructure:"db_backend"`
return BaseConfig{
Moniker: "anonymous",
ProfListenAddress: "",
+ Mining: false,
DBBackend: "leveldb",
DBPath: "data",
KeysPath: "keystore",
"github.com/bytom/bytom/protocol/bc"
)
-//consensus variables
+// consensus variables
const (
// Max gas that one block contains
MaxBlockGas = uint64(10000000)
DefaultGasCredit = int64(30000)
NumOfValidators = int(10)
- //config parameter for coinbase reward
+ // config parameter for coinbase reward
CoinbasePendingBlockNumber = uint64(100)
subsidyReductionInterval = uint64(840000)
baseSubsidy = uint64(41250000000)
type CasperConfig struct {
// BlockTimeInterval, milliseconds, the block time interval for producing a block
BlockTimeInterval uint64
+
+ // MaxTimeOffsetMs represent the max number of seconds a block time is allowed to be ahead of the current time
+ MaxTimeOffsetMs uint64
}
// BTMAssetID is BTM's asset id, the soul asset of Bytom
Bech32HRPSegwit: "bm",
DefaultPort: "46657",
DNSSeeds: []string{"www.mainnetseed.bytom.io"},
+ CasperConfig: CasperConfig{BlockTimeInterval: 6000, MaxTimeOffsetMs: 24000},
Checkpoints: []Checkpoint{
{10000, bc.NewHash([32]byte{0x93, 0xe1, 0xeb, 0x78, 0x21, 0xd2, 0xb4, 0xad, 0x0f, 0x5b, 0x1c, 0xea, 0x82, 0xe8, 0x43, 0xad, 0x8c, 0x09, 0x9a, 0xb6, 0x5d, 0x8f, 0x70, 0xc5, 0x84, 0xca, 0xa2, 0xdd, 0xf1, 0x74, 0x65, 0x2c})},
{20000, bc.NewHash([32]byte{0x7d, 0x38, 0x61, 0xf3, 0x2c, 0xc0, 0x03, 0x81, 0xbb, 0xcd, 0x9a, 0x37, 0x6f, 0x10, 0x5d, 0xfe, 0x6f, 0xfe, 0x2d, 0xa5, 0xea, 0x88, 0xa5, 0xe3, 0x42, 0xed, 0xa1, 0x17, 0x9b, 0xa8, 0x0b, 0x7c})},
Bech32HRPSegwit: "tm",
DefaultPort: "46656",
DNSSeeds: []string{"www.testnetseed.bytom.io"},
+ CasperConfig: CasperConfig{BlockTimeInterval: 6000, MaxTimeOffsetMs: 24000},
Checkpoints: []Checkpoint{
{10303, bc.NewHash([32]byte{0x3e, 0x94, 0x5d, 0x35, 0x70, 0x30, 0xd4, 0x3b, 0x3d, 0xe3, 0xdd, 0x80, 0x67, 0x29, 0x9a, 0x5e, 0x09, 0xf9, 0xfb, 0x2b, 0xad, 0x5f, 0x92, 0xc8, 0x69, 0xd1, 0x42, 0x39, 0x74, 0x9a, 0xd1, 0x1c})},
{40000, bc.NewHash([32]byte{0x6b, 0x13, 0x9a, 0x5b, 0x76, 0x77, 0x9b, 0xd4, 0x1c, 0xec, 0x53, 0x68, 0x44, 0xbf, 0xf4, 0x48, 0x94, 0x3d, 0x16, 0xe3, 0x9b, 0x2e, 0xe8, 0xa1, 0x0f, 0xa0, 0xbc, 0x7d, 0x2b, 0x17, 0x55, 0xfc})},
var SoloNetParams = Params{
Name: "solo",
Bech32HRPSegwit: "sm",
+ CasperConfig: CasperConfig{BlockTimeInterval: 6000, MaxTimeOffsetMs: 24000},
Checkpoints: []Checkpoint{},
}
import (
"encoding/binary"
+ "encoding/hex"
"encoding/json"
"time"
log "github.com/sirupsen/logrus"
"github.com/tendermint/tmlibs/common"
+ "github.com/bytom/bytom/consensus"
dbm "github.com/bytom/bytom/database/leveldb"
"github.com/bytom/bytom/database/storage"
"github.com/bytom/bytom/errors"
return nil, err
}
+ setSupLinkToCheckpoint(checkpoint, header.SupLinks)
return checkpoint, nil
}
func (s *Store) GetCheckpointsByHeight(height uint64) ([]*state.Checkpoint, error) {
iter := s.db.IteratorPrefix(calcCheckpointKey(height, nil))
defer iter.Release()
- return loadCheckpointsFromIter(iter)
+ return s.loadCheckpointsFromIter(iter)
}
// CheckpointsFromNode return all checkpoints from specified block height and hash
startKey := calcCheckpointKey(height, hash)
iter := s.db.IteratorPrefixWithStart(CheckpointPrefix, startKey, false)
- finalizedCheckpoint := &state.Checkpoint{}
- if err := json.Unmarshal(iter.Value(), finalizedCheckpoint); err != nil {
+ firstCheckpoint := &state.Checkpoint{}
+ if err := json.Unmarshal(iter.Value(), firstCheckpoint); err != nil {
return nil, err
}
- checkpoints := []*state.Checkpoint{finalizedCheckpoint}
- subs, err := loadCheckpointsFromIter(iter)
+ checkpoints := []*state.Checkpoint{firstCheckpoint}
+ subs, err := s.loadCheckpointsFromIter(iter)
if err != nil {
return nil, err
}
return checkpoints, nil
}
-func loadCheckpointsFromIter(iter dbm.Iterator) ([]*state.Checkpoint, error) {
+func (s *Store) loadCheckpointsFromIter(iter dbm.Iterator) ([]*state.Checkpoint, error) {
var checkpoints []*state.Checkpoint
defer iter.Release()
for iter.Next() {
return nil, err
}
+ header, err := s.GetBlockHeader(&checkpoint.Hash)
+ if err != nil {
+ return nil, err
+ }
+
+ setSupLinkToCheckpoint(checkpoint, header.SupLinks)
checkpoints = append(checkpoints, checkpoint)
}
return checkpoints, nil
return err
}
+ if checkpoint.Height % state.BlocksOfEpoch != 1 {
+ header, err := s.GetBlockHeader(&checkpoint.Hash)
+ if err != nil {
+ return err
+ }
+
+ batch.Delete(calcCheckpointKey(header.Height-1, &header.PreviousBlockHash))
+ }
+
batch.Set(calcCheckpointKey(checkpoint.Height, &checkpoint.Hash), data)
}
batch.Write()
return nil
}
+
+func setSupLinkToCheckpoint(c *state.Checkpoint, supLinks types.SupLinks) {
+ for _, supLink := range supLinks {
+ var signatures [consensus.NumOfValidators]string
+ for i, signature := range supLink.Signatures {
+ signatures[i] = hex.EncodeToString(signature)
+ }
+
+ c.SupLinks = append(c.SupLinks, &state.SupLink{
+ SourceHeight: supLink.SourceHeight,
+ SourceHash: supLink.SourceHash,
+ Signatures: signatures,
+ })
+ }
+}
log "github.com/sirupsen/logrus"
cmn "github.com/tendermint/tmlibs/common"
browser "github.com/toqueteos/webbrowser"
+ "github.com/bytom/bytom/proposal/blockproposer"
"github.com/prometheus/prometheus/util/flock"
"github.com/bytom/bytom/accesstoken"
notificationMgr *websocket.WSNotificationManager
api *api.API
chain *protocol.Chain
+ blockProposer *blockproposer.BlockProposer
+ miningEnable bool
txfeed *txfeed.Tracker
}
dispatcher := event.NewDispatcher()
txPool := protocol.NewTxPool(store, dispatcher)
- chain, err := protocol.NewChain(store, txPool)
+ chain, err := protocol.NewChain(store, txPool, dispatcher)
if err != nil {
cmn.Exit(cmn.Fmt("Failed to create chain structure: %v", err))
}
accessTokens: accessTokens,
wallet: wallet,
chain: chain,
+ miningEnable: config.Mining,
txfeed: txFeed,
notificationMgr: notificationMgr,
}
node.BaseService = *cmn.NewBaseService(nil, "Node", node)
-
+ node.blockProposer = blockproposer.NewBlockProposer(chain, accounts, dispatcher)
return node
}
}
func (n *Node) initAndstartAPIServer() {
- n.api = api.NewAPI(n.syncManager, n.wallet, n.txfeed, n.chain, n.config, n.accessTokens, n.eventDispatcher, n.notificationMgr)
+ n.api = api.NewAPI(n.syncManager, n.wallet, n.blockProposer, n.txfeed, n.chain, n.config, n.accessTokens, n.eventDispatcher, n.notificationMgr)
listenAddr := env.String("LISTEN", n.config.ApiAddress)
env.Parse()
}
func (n *Node) OnStart() error {
+ if n.miningEnable {
+ if _, err := n.wallet.AccountMgr.GetMiningAddress(); err != nil {
+ n.miningEnable = false
+ log.Error(err)
+ } else {
+ n.blockProposer.Start()
+ }
+ }
if !n.config.VaultMode {
if err := n.syncManager.Start(); err != nil {
return err
n.notificationMgr.Shutdown()
n.notificationMgr.WaitForShutdown()
n.BaseService.OnStop()
+ if n.miningEnable {
+ n.blockProposer.Stop()
+ }
if !n.config.VaultMode {
n.syncManager.Stop()
}
import (
"bytes"
"crypto"
+ "crypto/ed25519"
"encoding/hex"
"errors"
"fmt"
func newNetwork(conn transport, ourPubkey crypto.PublicKey, dbPath string, netrestrict *netutil.Netlist) (*Network, error) {
var ourID NodeID
- copy(ourID[:], ourPubkey.([]byte)[:nodeIDBits])
+ copy(ourID[:], ourPubkey.(ed25519.PublicKey)[:nodeIDBits])
var db *nodeDB
if dbPath != "<no database>" {
package p2p
import (
- "crypto/ed25519"
"fmt"
"net"
"sync"
if !config.VaultMode {
// Create listener
l, listenAddr = GetListener(config.P2P)
- discv, err = dht.NewDiscover(config, ed25519.PrivateKey(bytes), l.ExternalAddress().Port)
+ discv, err = dht.NewDiscover(config, bytes, l.ExternalAddress().Port)
if err != nil {
return nil, err
}
}
bestBlockHeader := b.chain.BestBlockHeader()
+ bestBlockHash := bestBlockHeader.Hash()
now := uint64(time.Now().UnixNano() / 1e6)
base := now
nextBlockTime += consensus.ActiveNetParams.BlockTimeInterval
}
- //TODO: get proposer by block hash and timestamp
- var proposer string
+ validator, err := b.chain.GetValidator(&bestBlockHash, nextBlockTime)
+ if err != nil {
+ log.WithFields(log.Fields{"module": logModule, "error": err, "pubKey": xpubStr}).Error("fail on check is next blocker")
+ continue
+ }
- if xpubStr != proposer {
+ if xpubStr != validator.PubKey {
continue
}
warnDuration := time.Duration(consensus.ActiveNetParams.BlockTimeInterval*warnTimeNum/warnTimeDenom) * time.Millisecond
criticalDuration := time.Duration(consensus.ActiveNetParams.BlockTimeInterval*criticalTimeNum/criticalTimeDenom) * time.Millisecond
- block, err := proposal.NewBlockTemplate(b.chain, b.accountManager, nextBlockTime, warnDuration, criticalDuration)
+ block, err := proposal.NewBlockTemplate(b.chain, validator, b.accountManager, nextBlockTime, warnDuration, criticalDuration)
if err != nil {
log.WithFields(log.Fields{"module": logModule, "error": err}).Error("failed on create NewBlockTemplate")
continue
)
// NewBlockTemplate returns a new block template that is ready to be solved
-func NewBlockTemplate(chain *protocol.Chain, accountManager *account.Manager, timestamp uint64, warnDuration, criticalDuration time.Duration) (*types.Block, error) {
- builder := newBlockBuilder(chain, accountManager, timestamp, warnDuration, criticalDuration)
+func NewBlockTemplate(chain *protocol.Chain, validator *state.Validator, accountManager *account.Manager, timestamp uint64, warnDuration, criticalDuration time.Duration) (*types.Block, error) {
+ builder := newBlockBuilder(chain, validator, accountManager, timestamp, warnDuration, criticalDuration)
return builder.build()
}
type blockBuilder struct {
chain *protocol.Chain
+ validator *state.Validator
accountManager *account.Manager
block *types.Block
gasLeft int64
}
-func newBlockBuilder(chain *protocol.Chain, accountManager *account.Manager, timestamp uint64, warnDuration, criticalDuration time.Duration) *blockBuilder {
+func newBlockBuilder(chain *protocol.Chain, validator *state.Validator, accountManager *account.Manager, timestamp uint64, warnDuration, criticalDuration time.Duration) *blockBuilder {
preBlockHeader := chain.BestBlockHeader()
block := &types.Block{
BlockHeader: types.BlockHeader{
builder := &blockBuilder{
chain: chain,
+ validator: validator,
accountManager: accountManager,
block: block,
utxoView: state.NewUtxoViewpoint(),
}
func (b *blockBuilder) build() (*types.Block, error) {
- if err := b.applyCoinbaseTransaction(); err != nil {
+ b.block.Transactions = []*types.Tx{nil}
+ feeAmount, err := b.applyTransactionFromPool()
+ if err != nil {
return nil, err
}
- if err := b.applyTransactionFromPool(); err != nil {
+ if err := b.applyCoinbaseTransaction(feeAmount); err != nil {
return nil, err
}
return nil, err
}
- b.chain.SignBlockHeader(&b.block.BlockHeader)
+ blockHeader := &b.block.BlockHeader
+ b.chain.SignBlockHeader(blockHeader)
return b.block, nil
}
-func (b *blockBuilder) applyCoinbaseTransaction() error {
- coinbaseTx, err := b.createCoinbaseTx()
+func (b *blockBuilder) applyCoinbaseTransaction(feeAmount uint64) error {
+ coinbaseTx, err := b.createCoinbaseTx(feeAmount)
if err != nil {
return errors.Wrap(err, "fail on create coinbase tx")
}
return err
}
- b.block.Transactions = append(b.block.Transactions, coinbaseTx)
+ b.block.Transactions[0] = coinbaseTx
b.gasLeft -= gasState.GasUsed
return nil
}
-func (b *blockBuilder) applyTransactionFromPool() error {
+func (b *blockBuilder) applyTransactionFromPool() (uint64, error) {
txDescList := b.chain.GetTxPool().GetTransactions()
sort.Sort(byTime(txDescList))
-
- poolTxs := make([]*types.Tx, len(txDescList))
- for i, txDesc := range txDescList {
- poolTxs[i] = txDesc.Tx
- }
-
- return b.applyTransactions(poolTxs, timeoutWarn)
+ return b.applyTransactions(txDescList, timeoutWarn)
}
func (b *blockBuilder) calculateBlockCommitment() (err error) {
// createCoinbaseTx returns a coinbase transaction paying an appropriate subsidy
// based on the passed block height to the provided address. When the address
// is nil, the coinbase transaction will instead be redeemable by anyone.
-func (b *blockBuilder) createCoinbaseTx() (tx *types.Tx, err error) {
+func (b *blockBuilder) createCoinbaseTx(feeAmount uint64) (tx *types.Tx, err error) {
arbitrary := append([]byte{0x00}, []byte(strconv.FormatUint(b.block.Height, 10))...)
var script []byte
if b.accountManager == nil {
return nil, err
}
- if err = builder.AddOutput(types.NewOriginalTxOutput(*consensus.BTMAssetID, 0, script, [][]byte{})); err != nil {
+ coinbaseAmount := consensus.BlockSubsidy(b.block.Height)
+ if err = builder.AddOutput(types.NewOriginalTxOutput(*consensus.BTMAssetID, coinbaseAmount + feeAmount, script, [][]byte{})); err != nil {
return nil, err
}
//TODO: calculate reward to proposer
return tx, nil
}
-func (b *blockBuilder) applyTransactions(txs []*types.Tx, timeoutStatus uint8) error {
+func (b *blockBuilder) applyTransactions(txs []*protocol.TxDesc, timeoutStatus uint8) (uint64, error) {
+ var feeAmount uint64
batchTxs := []*types.Tx{}
for i := 0; i < len(txs); i++ {
- if batchTxs = append(batchTxs, txs[i]); len(batchTxs) < batchApplyNum && i != len(txs)-1 {
+ if batchTxs = append(batchTxs, txs[i].Tx); len(batchTxs) < batchApplyNum && i != len(txs)-1 {
continue
}
b.gasLeft = gasLeft
batchTxs = batchTxs[:0]
+ feeAmount += txs[i].Fee
if b.getTimeoutStatus() >= timeoutStatus || len(b.block.Transactions) > softMaxTxNum {
break
}
}
- return nil
+ return feeAmount, nil
}
type validateTxResult struct {
c.newEpochCh <- block.Hash()
}
- return c.myVerification(target, validators)
+ return c.applyMyVerification(target, validators)
}
func (c *Casper) applyBlockToCheckpoint(block *types.Block) (*state.Checkpoint, error) {
if mod := block.Height % state.BlocksOfEpoch; mod == 1 {
parent := checkpoint
checkpoint = &state.Checkpoint{
- ParentHash: parent.Hash,
- Parent: parent,
- StartTimestamp: block.Timestamp,
- Status: state.Growing,
- Votes: make(map[string]uint64),
- Guaranties: make(map[string]uint64),
+ ParentHash: parent.Hash,
+ Parent: parent,
+ Status: state.Growing,
+ Votes: make(map[string]uint64),
+ Guaranties: make(map[string]uint64),
}
node.children = append(node.children, &treeNode{checkpoint: checkpoint})
} else if mod == 0 {
checkpoint.Height = block.Height
checkpoint.Hash = block.Hash()
- return checkpoint, nil
+ checkpoint.Timestamp = block.Timestamp
+ return checkpoint, c.store.SaveCheckpoints(checkpoint)
}
func (c *Casper) applyTransactions(target *state.Checkpoint, transactions []*types.Tx) error {
}
// applySupLinks copy the block's supLink to the checkpoint
-func (c *Casper) applySupLinks(target *state.Checkpoint, supLinks []*types.SupLink, validators []*state.Validator) error {
+func (c *Casper) applySupLinks(target *state.Checkpoint, supLinks []*types.SupLink, validators map[string]*state.Validator) error {
if target.Height%state.BlocksOfEpoch != 0 {
return nil
}
for _, supLink := range supLinks {
- for _, verification := range supLinkToVerifications(supLink, validators, target.Hash, target.Height) {
- if err := c.verifyVerification(verification, true); err == nil {
- if err := c.addVerificationToCheckpoint(target, verification); err != nil {
- return err
- }
+ var validVerifications []*Verification
+ for _, v := range supLinkToVerifications(supLink, validators, target.Hash, target.Height) {
+ if validate(v) == nil && c.verifyVerification(v, validators[v.PubKey].Order, true) == nil {
+ validVerifications = append(validVerifications, v)
}
}
+ if err := c.addVerificationToCheckpoint(target, validators, validVerifications...); err != nil {
+ return err
+ }
}
return nil
}
-func (c *Casper) myVerification(target *state.Checkpoint, validators []*state.Validator) (*Verification, error) {
+func (c *Casper) applyMyVerification(target *state.Checkpoint, validators map[string]*state.Validator) (*Verification, error) {
+ if target.Height%state.BlocksOfEpoch != 0 {
+ return nil, nil
+ }
+
pubKey := config.CommonConfig.PrivateKey().XPub().String()
- if !isValidator(pubKey, validators) {
+ if _, ok := validators[pubKey]; !ok {
return nil, nil
}
+ validatorOrder := validators[pubKey].Order
+ v, err := c.myVerification(target, validatorOrder)
+ if err != nil {
+ return nil, err
+ }
+
+ if v == nil {
+ return nil, nil
+ }
+
+ if err := c.addVerificationToCheckpoint(target, validators, v); err != nil {
+ return nil, err
+ }
+
+ return v, c.saveVerificationToHeader(v, validatorOrder)
+}
+
+func (c *Casper) myVerification(target *state.Checkpoint, validatorOrder int) (*Verification, error) {
source := c.lastJustifiedCheckpointOfBranch(target)
+ if target.ContainsVerification(source.Hash, validatorOrder) {
+ return nil, nil
+ }
+
+ pubKey := config.CommonConfig.PrivateKey().XPub().String()
if source != nil {
v := &Verification{
SourceHash: source.Hash,
return nil, err
}
- if err := c.verifyVerification(v, false); err != nil {
+ if err := c.verifyVerification(v, validatorOrder,false); err != nil {
return nil, nil
}
- return v, c.addVerificationToCheckpoint(target, v)
+ return v, nil
}
return nil, nil
}
case state.Justified:
return parent
}
+ parent = parent.Parent
}
return nil
}
-func supLinkToVerifications(supLink *types.SupLink, validators []*state.Validator, targetHash bc.Hash, targetHeight uint64) []*Verification {
+func supLinkToVerifications(supLink *types.SupLink, validators map[string]*state.Validator, targetHash bc.Hash, targetHeight uint64) []*Verification {
+ validatorList := make([]*state.Validator, len(validators))
+ for _, validator := range validators {
+ validatorList[validator.Order] = validator
+ }
+
var result []*Verification
for i, signature := range supLink.Signatures {
result = append(result, &Verification{
SourceHeight: supLink.SourceHeight,
TargetHeight: targetHeight,
Signature: hex.EncodeToString(signature),
- PubKey: validators[i].PubKey,
+ PubKey: validatorList[i].PubKey,
})
}
return result
package protocol
import (
+ "encoding/hex"
"fmt"
log "github.com/sirupsen/logrus"
return err
}
+ target, err := c.store.GetCheckpoint(&v.TargetHash)
+ if err != nil {
+ c.verificationCache.Add(verificationCacheKey(v.TargetHash, v.PubKey), v)
+ return nil
+ }
+
validators, err := c.Validators(&v.TargetHash)
if err != nil {
return err
}
- if !isValidator(v.PubKey, validators) {
+ if _, ok := validators[v.PubKey]; !ok {
return errPubKeyIsNotValidator
}
+ if target.ContainsVerification(v.SourceHash, validators[v.PubKey].Order) {
+ return nil
+ }
+
c.mu.Lock()
defer c.mu.Unlock()
return nil
}
- return c.authVerification(v)
+ return c.authVerification(v, target, validators)
}
-func (c *Casper) authVerification(v *Verification) error {
- target, err := c.store.GetCheckpoint(&v.TargetHash)
- if err != nil {
- c.verificationCache.Add(verificationCacheKey(v.TargetHash, v.PubKey), v)
- return nil
+func (c *Casper) authVerification(v *Verification, target *state.Checkpoint, validators map[string]*state.Validator) error {
+ validator := validators[v.PubKey]
+ if err := c.verifyVerification(v, validator.Order, true); err != nil {
+ return err
}
- if err := c.verifyVerification(v, true); err != nil {
+ if err := c.addVerificationToCheckpoint(target, validators, v); err != nil {
return err
}
- return c.addVerificationToCheckpoint(target, v)
+ return c.saveVerificationToHeader(v, validator.Order)
}
-func (c *Casper) addVerificationToCheckpoint(target *state.Checkpoint, v *Verification) error {
- source, err := c.store.GetCheckpoint(&v.SourceHash)
- if err != nil {
- return err
- }
+func (c *Casper) addVerificationToCheckpoint(target *state.Checkpoint, validators map[string]*state.Validator, verifications ...*Verification) error {
+ _, oldBestHash := c.bestChain()
+ var affectedCheckpoints []*state.Checkpoint
+ for _, v := range verifications {
+ source, err := c.store.GetCheckpoint(&v.SourceHash)
+ if err != nil {
+ return err
+ }
- supLink := target.AddVerification(v.SourceHash, v.SourceHeight, v.PubKey, v.Signature)
- if target.Status != state.Unjustified || !supLink.IsMajority() || source.Status == state.Finalized {
- return nil
- }
+ supLink := target.AddVerification(v.SourceHash, v.SourceHeight, validators[v.PubKey].Order, v.Signature)
+ if target.Status != state.Unjustified || !supLink.IsMajority() || source.Status == state.Finalized {
+ continue
+ }
- if source.Status == state.Unjustified {
- c.justifyingCheckpoints[source.Hash] = append(c.justifyingCheckpoints[source.Hash], target)
- return nil
+ if source.Status == state.Unjustified {
+ c.justifyingCheckpoints[source.Hash] = append(c.justifyingCheckpoints[source.Hash], target)
+ continue
+ }
+
+ affectedCheckpoints = append(affectedCheckpoints, c.setJustified(source, target)...)
}
- _, oldBestHash := c.BestChain()
- affectedCheckpoints := c.setJustified(source, target)
- _, newBestHash := c.BestChain()
+ _, newBestHash := c.bestChain()
if oldBestHash != newBestHash {
c.rollbackNotifyCh <- nil
}
return c.store.SaveCheckpoints(affectedCheckpoints...)
}
+func (c *Casper) saveVerificationToHeader(v *Verification, validatorOrder int) error {
+ blockHeader, err := c.store.GetBlockHeader(&v.TargetHash)
+ if err != nil {
+ return err
+ }
+
+ signature, err := hex.DecodeString(v.Signature)
+ if err != nil {
+ return err
+ }
+
+ blockHeader.SupLinks.AddSupLink(v.SourceHeight, v.SourceHash, signature, validatorOrder)
+ return c.store.SaveBlockHeader(blockHeader)
+}
+
+// source status is justified, and exist a super majority link from source to target
func (c *Casper) setJustified(source, target *state.Checkpoint) []*state.Checkpoint {
- affectedCheckpoints := make(map[bc.Hash]*state.Checkpoint)
+ var affectedCheckpoint []*state.Checkpoint
target.Status = state.Justified
- affectedCheckpoints[target.Hash] = target
// must direct child
if target.Parent.Hash == source.Hash {
c.setFinalized(source)
- affectedCheckpoints[source.Hash] = source
}
for _, checkpoint := range c.justifyingCheckpoints[target.Hash] {
- for _, c := range c.setJustified(target, checkpoint) {
- affectedCheckpoints[c.Hash] = c
- }
+ affectedCheckpoint = append(affectedCheckpoint, c.setJustified(target, checkpoint)...)
}
- delete(c.justifyingCheckpoints, target.Hash)
- var result []*state.Checkpoint
- for _, c := range affectedCheckpoints {
- result = append(result, c)
- }
- return result
+ delete(c.justifyingCheckpoints, target.Hash)
+ affectedCheckpoint = append(affectedCheckpoint, source, target)
+ return affectedCheckpoint
}
func (c *Casper) setFinalized(checkpoint *state.Checkpoint) {
continue
}
+ v := verification.(*Verification)
+ target, err := c.store.GetCheckpoint(&v.TargetHash)
+ if err != nil {
+ log.WithField("err", err).Error("get target checkpoint")
+ c.verificationCache.Remove(key)
+ continue
+ }
+
c.mu.Lock()
- if err := c.authVerification(verification.(*Verification)); err != nil {
+ if err := c.authVerification(v, target, validators); err != nil {
log.WithField("err", err).Error("auth verification in cache")
}
c.mu.Unlock()
}
}
-func (c *Casper) verifyVerification(v *Verification, trackEvilValidator bool) error {
- if err := c.verifySameHeight(v, trackEvilValidator); err != nil {
+func (c *Casper) verifyVerification(v *Verification, validatorOrder int, trackEvilValidator bool) error {
+ if err := c.verifySameHeight(v, validatorOrder, trackEvilValidator); err != nil {
return err
}
- return c.verifySpanHeight(v, trackEvilValidator)
+ return c.verifySpanHeight(v, validatorOrder, trackEvilValidator)
}
// a validator must not publish two distinct votes for the same target height
-func (c *Casper) verifySameHeight(v *Verification, trackEvilValidator bool) error {
+func (c *Casper) verifySameHeight(v *Verification, validatorOrder int, trackEvilValidator bool) error {
checkpoints, err := c.store.GetCheckpointsByHeight(v.TargetHeight)
if err != nil {
return err
for _, checkpoint := range checkpoints {
for _, supLink := range checkpoint.SupLinks {
- if _, ok := supLink.Signatures[v.PubKey]; ok && checkpoint.Hash != v.TargetHash {
+ if supLink.Signatures[validatorOrder] != "" && checkpoint.Hash != v.TargetHash {
if trackEvilValidator {
- c.evilValidators[v.PubKey] = []*Verification{v, makeVerification(supLink, checkpoint, v.PubKey)}
+ c.evilValidators[v.PubKey] = []*Verification{v, makeVerification(supLink, checkpoint, v.PubKey, validatorOrder)}
}
return errSameHeightInVerification
}
}
// a validator must not vote within the span of its other votes.
-func (c *Casper) verifySpanHeight(v *Verification, trackEvilValidator bool) error {
+func (c *Casper) verifySpanHeight(v *Verification, validatorOrder int, trackEvilValidator bool) error {
if c.tree.findOnlyOne(func(checkpoint *state.Checkpoint) bool {
if checkpoint.Height == v.TargetHeight {
return false
}
for _, supLink := range checkpoint.SupLinks {
- if _, ok := supLink.Signatures[v.PubKey]; ok {
+ if supLink.Signatures[validatorOrder] != "" {
if (checkpoint.Height < v.TargetHeight && supLink.SourceHeight > v.SourceHeight) ||
(checkpoint.Height > v.TargetHeight && supLink.SourceHeight < v.SourceHeight) {
if trackEvilValidator {
- c.evilValidators[v.PubKey] = []*Verification{v, makeVerification(supLink, checkpoint, v.PubKey)}
+ c.evilValidators[v.PubKey] = []*Verification{v, makeVerification(supLink, checkpoint, v.PubKey, validatorOrder)}
}
return true
}
return nil
}
-func makeVerification(supLink *state.SupLink, checkpoint *state.Checkpoint, pubKey string) *Verification {
+func makeVerification(supLink *state.SupLink, checkpoint *state.Checkpoint, pubKey string, validatorOrder int) *Verification {
return &Verification{
SourceHash: supLink.SourceHash,
TargetHash: checkpoint.Hash,
SourceHeight: supLink.SourceHeight,
TargetHeight: checkpoint.Height,
- Signature: supLink.Signatures[pubKey],
+ Signature: supLink.Signatures[validatorOrder],
PubKey: pubKey,
}
}
// SupLinks is alias of SupLink slice
type SupLinks []*SupLink
+// AddSupLink used to add a supLink by specified validator
+func (s *SupLinks) AddSupLink(sourceHeight uint64, sourceHash bc.Hash, signature []byte, validatorOrder int) {
+ for _, supLink := range *s {
+ if supLink.SourceHash == sourceHash {
+ supLink.Signatures[validatorOrder] = signature
+ return
+ }
+ }
+
+ supLink := &SupLink{SourceHeight: sourceHeight, SourceHash: sourceHash}
+ supLink.Signatures[validatorOrder] = signature
+ *s = append(*s, supLink)
+}
+
func (s *SupLinks) readFrom(r *blockchain.Reader) (err error) {
size, err := blockchain.ReadVarint31(r)
if err != nil {
return err
}
- if err := c.broadcastVerification(verification); err != nil {
- return err
+ if verification != nil {
+ if err := c.broadcastVerification(verification); err != nil {
+ return err
+ }
}
contractView := state.NewContractViewpoint()
c.mu.RLock()
defer c.mu.RUnlock()
+ return c.bestChain()
+}
+
+func (c *Casper) bestChain() (uint64, bc.Hash) {
// root is init justified
root := c.tree.checkpoint
bestHeight, bestHash, _ := chainOfMaxJustifiedHeight(c.tree, root.Height)
// Validators return the validators by specified block hash
// e.g. if the block num of epoch is 100, and the block height corresponding to the block hash is 130, then will return the voting results of height in 0~100
-func (c *Casper) Validators(blockHash *bc.Hash) ([]*state.Validator, error) {
+func (c *Casper) Validators(blockHash *bc.Hash) (map[string]*state.Validator, error) {
+ checkpoint, err := c.prevCheckpoint(blockHash)
+ if err != nil {
+ return nil, err
+ }
+
+ return checkpoint.Validators(), nil
+}
+
+func (c *Casper) prevCheckpoint(blockHash *bc.Hash) (*state.Checkpoint, error) {
hash, err := c.prevCheckpointHash(blockHash)
if err != nil {
return nil, err
}
- checkpoint, err := c.store.GetCheckpoint(hash)
+ return c.store.GetCheckpoint(hash)
+}
+
+func (c *Casper) prevCheckpointByPrevHash(prevBlockHash *bc.Hash) (*state.Checkpoint, error) {
+ hash, err := c.prevCheckpointHashByPrevHash(prevBlockHash)
if err != nil {
return nil, err
}
- return checkpoint.Validators(), nil
+ return c.store.GetCheckpoint(hash)
}
// EvilValidator represent a validator who broadcast two distinct verification that violate the commandment
return bestHeight, bestHash, maxJustifiedHeight
}
-func isValidator(pubKey string, validators []*state.Validator) bool {
- for _, v := range validators {
- if v.PubKey == pubKey {
- return true
- }
- }
- return false
-}
-
func (c *Casper) prevCheckpointHash(blockHash *bc.Hash) (*bc.Hash, error) {
if data, ok := c.prevCheckpointCache.Get(*blockHash); ok {
return data.(*bc.Hash), nil
}
- for {
- block, err := c.store.GetBlockHeader(blockHash)
- if err != nil {
- return nil, err
- }
+ block, err := c.store.GetBlockHeader(blockHash)
+ if err != nil {
+ return nil, err
+ }
- prevHeight, prevHash := block.Height-1, block.PreviousBlockHash
+ result, err := c.prevCheckpointHashByPrevHash(&block.PreviousBlockHash)
+ if err != nil {
+ return nil, err
+ }
+
+ c.prevCheckpointCache.Add(blockHash, result)
+ return result, nil
+}
+
+func (c *Casper) prevCheckpointHashByPrevHash(prevBlockHash *bc.Hash) (*bc.Hash, error) {
+ prevHash := prevBlockHash
+ for {
if data, ok := c.prevCheckpointCache.Get(prevHash); ok {
- c.prevCheckpointCache.Add(blockHash, data)
+ c.prevCheckpointCache.Add(prevBlockHash, data)
return data.(*bc.Hash), nil
}
- if prevHeight%state.BlocksOfEpoch == 0 {
- c.prevCheckpointCache.Add(blockHash, &prevHash)
- return &prevHash, nil
+ prevBlock, err := c.store.GetBlockHeader(prevHash)
+ if err != nil {
+ return nil, err
+ }
+
+ if prevBlock.Height%state.BlocksOfEpoch == 0 {
+ c.prevCheckpointCache.Add(prevBlockHash, prevHash)
+ return prevHash, nil
}
- blockHash = &prevHash
+ prevHash = &prevBlock.PreviousBlockHash
}
}
log "github.com/sirupsen/logrus"
"github.com/bytom/bytom/config"
+ "github.com/bytom/bytom/consensus"
+ "github.com/bytom/bytom/errors"
"github.com/bytom/bytom/event"
"github.com/bytom/bytom/protocol/bc"
"github.com/bytom/bytom/protocol/bc/types"
}
// NewChain returns a new Chain using store as the underlying storage.
-func NewChain(store Store, txPool *TxPool) (*Chain, error) {
- return NewChainWithOrphanManage(store, txPool, NewOrphanManage())
+func NewChain(store Store, txPool *TxPool, eventDispatcher *event.Dispatcher) (*Chain, error) {
+ return NewChainWithOrphanManage(store, txPool, NewOrphanManage(), eventDispatcher)
}
-func NewChainWithOrphanManage(store Store, txPool *TxPool, manage *OrphanManage) (*Chain, error) {
+func NewChainWithOrphanManage(store Store, txPool *TxPool, manage *OrphanManage, eventDispatcher *event.Dispatcher) (*Chain, error) {
c := &Chain{
orphanManage: manage,
+ eventDispatcher: eventDispatcher,
txPool: txPool,
store: store,
rollbackNotifyCh: make(chan interface{}),
}
checkpoint := &state.Checkpoint{
- Height: 0,
- Hash: genesisBlock.Hash(),
- StartTimestamp: genesisBlock.Timestamp,
- Status: state.Justified,
+ Height: 0,
+ Hash: genesisBlock.Hash(),
+ Timestamp: genesisBlock.Timestamp,
+ Status: state.Justified,
}
if err := c.store.SaveCheckpoints(checkpoint); err != nil {
return err
return &hash
}
+// GetValidator return validator by specified blockHash and timestamp
+func (c *Chain) GetValidator(prevHash *bc.Hash, timeStamp uint64) (*state.Validator, error) {
+ prevCheckpoint, err := c.casper.prevCheckpointByPrevHash(prevHash)
+ if err != nil {
+ return nil, err
+ }
+
+ validators := prevCheckpoint.Validators()
+ startTimestamp := prevCheckpoint.Timestamp + consensus.ActiveNetParams.BlockTimeInterval
+ order := getValidatorOrder(startTimestamp, timeStamp, uint64(len(validators)))
+ for _, validator := range validators {
+ if validator.Order == int(order) {
+ return validator, nil
+ }
+ }
+ return nil, errors.New("get blocker failure")
+}
+
+func getValidatorOrder(startTimestamp, blockTimestamp, numOfConsensusNode uint64) uint64 {
+ // One round of product block time for all consensus nodes
+ roundBlockTime := state.BlocksOfEpoch * numOfConsensusNode * consensus.ActiveNetParams.BlockTimeInterval
+ // The start time of the last round of product block
+ lastRoundStartTime := startTimestamp + (blockTimestamp-startTimestamp)/roundBlockTime*roundBlockTime
+ // Order of blocker
+ return (blockTimestamp - lastRoundStartTime) / (state.BlocksOfEpoch * consensus.ActiveNetParams.BlockTimeInterval)
+}
+
// BestBlockHeader returns the chain tail block
func (c *Chain) BestBlockHeader() *types.BlockHeader {
node := c.index.BestNode()
}
func (c *Chain) SignBlockHeader(blockHeader *types.BlockHeader) {
- c.cond.L.Lock()
- defer c.cond.L.Unlock()
xprv := config.CommonConfig.PrivateKey()
signature := xprv.Sign(blockHeader.Hash().Bytes())
blockHeader.Set(signature)
import (
"sort"
+ "github.com/bytom/bytom/consensus"
"github.com/bytom/bytom/protocol/bc"
)
// BlocksOfEpoch represent the block num in one epoch
BlocksOfEpoch = 100
minMortgage = 1000000
- numOfValidators = 10
)
// CheckpointStatus represent current status of checkpoint
type SupLink struct {
SourceHeight uint64
SourceHash bc.Hash
- Signatures map[string]string // pubKey to signature
+ Signatures [consensus.NumOfValidators]string
}
// IsMajority if at least 2/3 of validators have published votes with sup link
func (s *SupLink) IsMajority() bool {
- return len(s.Signatures) > numOfValidators*2/3
+ numOfSignatures := 0
+ for _, signature := range s.Signatures {
+ if signature != "" {
+ numOfSignatures++
+ }
+ }
+ return numOfSignatures > consensus.NumOfValidators*2/3
}
// Checkpoint represent the block/hash under consideration for finality for a given epoch.
Hash bc.Hash
ParentHash bc.Hash
// only save in the memory, not be persisted
- Parent *Checkpoint `json:"-"`
- StartTimestamp uint64
- SupLinks []*SupLink
- Status CheckpointStatus
+ Parent *Checkpoint `json:"-"`
+ Timestamp uint64
+ SupLinks []*SupLink `json:"-"`
+ Status CheckpointStatus
Votes map[string]uint64 // putKey -> num of vote
Guaranties map[string]uint64 // pubKey -> num of guaranty
}
// AddVerification add a valid verification to checkpoint's supLink
-func (c *Checkpoint) AddVerification(sourceHash bc.Hash, sourceHeight uint64, pubKey, signature string) *SupLink {
+func (c *Checkpoint) AddVerification(sourceHash bc.Hash, sourceHeight uint64, validatorOrder int, signature string) *SupLink {
for _, supLink := range c.SupLinks {
if supLink.SourceHash == sourceHash {
- supLink.Signatures[pubKey] = signature
+ supLink.Signatures[validatorOrder] = signature
return supLink
}
}
supLink := &SupLink{
SourceHeight: sourceHeight,
SourceHash: sourceHash,
- Signatures: map[string]string{pubKey: signature},
}
+ supLink.Signatures[validatorOrder] = signature
c.SupLinks = append(c.SupLinks, supLink)
return supLink
}
+// ContainsVerification return whether the specified validator has add verification to current checkpoint
+func (c *Checkpoint) ContainsVerification(sourceHash bc.Hash, validatorOrder int) bool {
+ for _, supLink := range c.SupLinks {
+ if supLink.SourceHash == sourceHash && supLink.Signatures[validatorOrder] != "" {
+ return true
+ }
+ }
+ return false
+}
+
// Validator represent the participants of the PoS network
// Responsible for block generation and verification
type Validator struct {
PubKey string
+ Order int
Vote uint64
Guaranty uint64
}
// Validators return next epoch of validators, if the status of checkpoint is growing, return empty
-func (c *Checkpoint) Validators() []*Validator {
+func (c *Checkpoint) Validators() map[string]*Validator {
var validators []*Validator
if c.Status == Growing {
- return validators
+ return nil
}
for pubKey, mortgageNum := range c.Guaranties {
return validators[i].Guaranty+validators[i].Vote > validators[j].Guaranty+validators[j].Vote
})
- end := numOfValidators
- if len(validators) < numOfValidators {
- end = len(validators)
+ result := make(map[string]*Validator)
+ for i := 0; i < len(validators) && i < consensus.NumOfValidators; i++ {
+ validator := validators[i]
+ validator.Order = i
+ result[validator.PubKey] = validator
}
- return validators[:end]
+
+ return result
}
LoadBlockIndex(uint64) (*state.BlockIndex, error)
SaveBlock(*types.Block) error
+ SaveBlockHeader(*types.BlockHeader) error
SaveChainStatus(*state.BlockNode, *state.UtxoViewpoint, *state.ContractViewpoint, uint64, *bc.Hash) error
}
node := nodes[0]
for _, successor := range parentToSuccessors[node.checkpoint.Hash] {
child := &treeNode{checkpoint: successor}
+ successor.Parent = node.checkpoint
node.children = append(node.children, child)
nodes = append(nodes, child)
}
func (s *mockStore) GetContract(hash [32]byte) ([]byte, error) { return nil, nil }
func (s *mockStore) LoadBlockIndex(uint64) (*state.BlockIndex, error) { return nil, nil }
func (s *mockStore) SaveBlock(*types.Block) error { return nil }
+func (s *mockStore) SaveBlockHeader(*types.BlockHeader) error { return nil }
func (s *mockStore) SaveChainStatus(*state.BlockNode, *state.UtxoViewpoint, *state.ContractViewpoint, uint64, *bc.Hash) error {
return nil
}
func (s *mockStore1) GetUtxo(*bc.Hash) (*storage.UtxoEntry, error) { return nil, nil }
func (s *mockStore1) GetContract(hash [32]byte) ([]byte, error) { return nil, nil }
func (s *mockStore1) LoadBlockIndex(uint64) (*state.BlockIndex, error) { return nil, nil }
-func (s *mockStore1) SaveBlock(*types.Block) error { return nil }
+func (s *mockStore1) SaveBlock(*types.Block) error { return nil }
+func (s *mockStore1) SaveBlockHeader(*types.BlockHeader) error { return nil }
func (s *mockStore1) SaveChainStatus(*state.BlockNode, *state.UtxoViewpoint, *state.ContractViewpoint, uint64, *bc.Hash) error { return nil}
func TestProcessTransaction(t *testing.T) {
errVersionRegression = errors.New("version regression")
)
-func checkBlockTime(b *bc.Block, parent *state.BlockNode) error {
- if b.Timestamp > uint64(time.Now().Unix())+consensus.MaxTimeOffsetSeconds {
+func checkBlockTime(b *bc.Block, parent *types.BlockHeader) error {
+ now := uint64(time.Now().UnixNano() / 1e6)
+ if b.Timestamp < (parent.Timestamp + consensus.ActiveNetParams.BlockTimeInterval) {
return errBadTimestamp
}
-
- if b.Timestamp <= parent.CalcPastMedianTime() {
+ if b.Timestamp > (now + consensus.ActiveNetParams.MaxTimeOffsetMs) {
return errBadTimestamp
}
+
return nil
}
return errors.WithDetailf(errMismatchedBlock, "previous block ID %x, current block wants %x", parent.Hash.Bytes(), b.PreviousBlockId.Bytes())
}
- if err := checkBlockTime(b, parent); err != nil {
+ if err := checkBlockTime(b, parent.BlockHeader()); err != nil {
return err
}
return nil
err error
}{
{
- blockTime: 1520000001,
+ blockTime: 1520006000,
parentTime: []uint64{1520000000},
err: nil,
},
{
- desc: "timestamp less than past median time (blocktest#1005)",
- blockTime: 1510000094,
- parentTime: []uint64{1520000000, 1510000099, 1510000098, 1510000097, 1510000096, 1510000095, 1510000094, 1510000093, 1510000092, 1510000091, 1510000090},
- err: errBadTimestamp,
+ desc: "timestamp less than past median time",
+ blockTime: 1520006000,
+ parentTime: []uint64{1520000000, 1520000500, 1520001000, 1520001500, 1520002000, 1520002500, 1520003000, 1520003500, 1520004000, 1520004500, 1520005000},
+ err: nil,
},
{
- desc: "timestamp greater than max limit (blocktest#1006)",
- blockTime: 9999999999,
- parentTime: []uint64{1520000000},
+ desc: "timestamp greater than max limit",
+ blockTime: 99999999990000,
+ parentTime: []uint64{15200000000000},
err: errBadTimestamp,
},
{
- desc: "timestamp of the block and the parent block are both greater than max limit (blocktest#1007)",
- blockTime: uint64(time.Now().Unix()) + consensus.MaxTimeOffsetSeconds + 2,
- parentTime: []uint64{uint64(time.Now().Unix()) + consensus.MaxTimeOffsetSeconds + 1},
+ desc: "timestamp of the block and the parent block are both greater than max limit",
+ blockTime: uint64(time.Now().UnixNano()/int64(time.Millisecond)) + consensus.ActiveNetParams.MaxTimeOffsetMs + 2000,
+ parentTime: []uint64{uint64(time.Now().UnixNano()/int64(time.Millisecond)) + consensus.ActiveNetParams.MaxTimeOffsetMs + 1000},
err: errBadTimestamp,
},
}
- parent := &state.BlockNode{Version: 1}
+ parent := &types.BlockHeader{Version: 1}
block := &bc.Block{
BlockHeader: &bc.BlockHeader{Version: 1},
}
parent.Timestamp = c.parentTime[0]
parentSuccessor := parent
for i := 1; i < len(c.parentTime); i++ {
- parentSuccessor.Parent = &state.BlockNode{Version: 1, Timestamp: c.parentTime[i]}
- parentSuccessor = parentSuccessor.Parent
+ Previous := &types.BlockHeader{Version: 1, Timestamp: c.parentTime[i]}
+ parentSuccessor.PreviousBlockHash = Previous.Hash()
+ parentSuccessor = Previous
}
block.Timestamp = c.blockTime
BlockHeader: &bc.BlockHeader{
Version: 1,
Height: 1,
- Timestamp: 1523352601,
+ Timestamp: 1523358600,
PreviousBlockId: &bc.Hash{V0: 0},
},
},
BlockHeader: &bc.BlockHeader{
Version: 1,
Height: 1,
- Timestamp: 1523352601,
+ Timestamp: 1523358600,
PreviousBlockId: &bc.Hash{V0: 0},
TransactionsRoot: &bc.Hash{V0: 1},
},
BlockHeader: &bc.BlockHeader{
Version: 1,
Height: 1,
- Timestamp: 1523352601,
+ Timestamp: 1523358600,
PreviousBlockId: &bc.Hash{V0: 0},
TransactionsRoot: &bc.Hash{V0: 6294987741126419124, V1: 12520373106916389157, V2: 5040806596198303681, V3: 1151748423853876189},
},
BlockHeader: &bc.BlockHeader{
Version: 1,
Height: 1,
- Timestamp: 1523352601,
+ Timestamp: 1523358600,
PreviousBlockId: &bc.Hash{V0: 0},
},
Transactions: []*bc.Tx{
BlockHeader: &bc.BlockHeader{
Version: 1,
Height: 1,
- Timestamp: 1523352601,
+ Timestamp: 1523358600,
PreviousBlockId: &bc.Hash{V0: 0},
TransactionsRoot: &bc.Hash{V0: 1},
},
}
txPool := protocol.NewTxPool(store, event.NewDispatcher())
- chain, err := protocol.NewChainWithOrphanManage(store, txPool, orphanManage)
+ chain, err := protocol.NewChainWithOrphanManage(store, txPool, orphanManage, nil)
if err != nil {
return err
}
store := database.NewStore(testDB)
dispatcher := event.NewDispatcher()
txPool := protocol.NewTxPool(store, dispatcher)
- chain, err := protocol.NewChain(store, txPool)
+ chain, err := protocol.NewChain(store, txPool, dispatcher)
return chain, store, txPool, err
}
dispatcher := event.NewDispatcher()
txPool := protocol.NewTxPool(store, dispatcher)
- chain, err := protocol.NewChain(store, txPool)
+ chain, err := protocol.NewChain(store, txPool, dispatcher)
if err != nil {
t.Fatal(err)
}
store := database.NewStore(testDB)
dispatcher := event.NewDispatcher()
txPool := protocol.NewTxPool(store, dispatcher)
- chain, err := protocol.NewChain(store, txPool)
+ chain, err := protocol.NewChain(store, txPool, dispatcher)
if err != nil {
t.Fatal(err)
}
dispatcher := event.NewDispatcher()
txPool := protocol.NewTxPool(store, dispatcher)
- chain, err := protocol.NewChain(store, txPool)
+ chain, err := protocol.NewChain(store, txPool, dispatcher)
if err != nil {
t.Fatal(err)
}