From: mars Date: Tue, 8 Jan 2019 01:21:55 +0000 (+0800) Subject: add dpos consensus X-Git-Tag: v1.0.5~221^2~2^2~2 X-Git-Url: http://git.osdn.net/view?p=bytom%2Fvapor.git;a=commitdiff_plain;h=ddc7106558f020bde24cc337d51649611dddaba8 add dpos consensus --- diff --git a/account/dpos_builder.go b/account/dpos_builder.go new file mode 100644 index 00000000..299e5a00 --- /dev/null +++ b/account/dpos_builder.go @@ -0,0 +1,184 @@ +package account + +import ( + "context" + "encoding/json" + + "github.com/vapor/config" + + "github.com/vapor/blockchain/txbuilder" + "github.com/vapor/common" + "github.com/vapor/consensus" + "github.com/vapor/crypto/ed25519/chainkd" + "github.com/vapor/errors" + "github.com/vapor/protocol/bc" + "github.com/vapor/protocol/bc/types" + "github.com/vapor/protocol/vm/vmutil" +) + +func (m *Manager) DecodeDposAction(data []byte) (txbuilder.Action, error) { + a := &DopsAction{Accounts: m} + err := json.Unmarshal(data, a) + return a, err +} + +type DopsAction struct { + Accounts *Manager + bc.AssetAmount + From string `json:"from"` + To string `json:"to"` + Fee uint64 `json:"fee"` + UseUnconfirmed bool `json:"use_unconfirmed"` +} + +func (a *DopsAction) Build(ctx context.Context, b *txbuilder.TemplateBuilder) error { + var missing []string + + if a.AssetId.IsZero() { + missing = append(missing, "asset_id") + } + if a.From == "" { + missing = append(missing, "from") + } + if a.To == "" { + missing = append(missing, "to") + } + + if len(missing) > 0 { + return txbuilder.MissingFieldsError(missing...) + } + res, err := a.Accounts.utxoKeeper.ReserveByAddress(a.From, a.AssetId, a.Amount, a.UseUnconfirmed, false) + if err != nil { + return errors.Wrap(err, "reserving utxos") + } + + // Cancel the reservation if the build gets rolled back. + b.OnRollback(func() { a.Accounts.utxoKeeper.Cancel(res.id) }) + for _, r := range res.utxos { + txInput, sigInst, err := DposTx(a.From, a.To, a.Amount, r) + if err != nil { + return errors.Wrap(err, "creating inputs") + } + if err = b.AddInput(txInput, sigInst); err != nil { + return errors.Wrap(err, "adding inputs") + } + } + + res, err = a.Accounts.utxoKeeper.ReserveByAddress(a.From, a.AssetId, a.Fee, a.UseUnconfirmed, true) + if err != nil { + return errors.Wrap(err, "reserving utxos") + } + + // Cancel the reservation if the build gets rolled back. + b.OnRollback(func() { a.Accounts.utxoKeeper.Cancel(res.id) }) + for _, r := range res.utxos { + txSpendInput, sigInst, err := spendInput(r) + if err != nil { + return errors.Wrap(err, "creating inputs") + } + + if err = b.AddInput(txSpendInput, sigInst); err != nil { + return errors.Wrap(err, "adding inputs") + } + } + if res.change >= 0 { + address, err := common.DecodeAddress(a.From, &consensus.ActiveNetParams) + if err != nil { + return err + } + redeemContract := address.ScriptAddress() + program, err := vmutil.P2WPKHProgram(redeemContract) + if err != nil { + return err + } + if err = b.AddOutput(types.NewTxOutput(*consensus.BTMAssetID, res.change, program)); err != nil { + return errors.Wrap(err, "adding change output") + } + } + + return nil +} + +func (a *DopsAction) ActionType() string { + return "dpos" +} + +// DposInputs convert an utxo to the txinput +func DposTx(from, to string, stake uint64, u *UTXO) (*types.TxInput, *txbuilder.SigningInstruction, error) { + txInput := types.NewDpos(nil, from, to, u.SourceID, u.AssetID, stake, u.Amount, u.SourcePos, u.ControlProgram, types.Delegate) + sigInst := &txbuilder.SigningInstruction{} + var xpubs []chainkd.XPub + var xprv chainkd.XPrv + xprv.UnmarshalText([]byte(config.CommonConfig.Consensus.Dpos.XPrv)) + xpubs = append(xpubs, xprv.XPub()) + quorum := len(xpubs) + if u.Address == "" { + sigInst.AddWitnessKeysWithOutPath(xpubs, quorum) + return txInput, sigInst, nil + } + + address, err := common.DecodeAddress(u.Address, &consensus.ActiveNetParams) + if err != nil { + return nil, nil, err + } + sigInst.AddRawWitnessKeysWithoutPath(xpubs, quorum) + switch address.(type) { + case *common.AddressWitnessPubKeyHash: + derivedPK := xpubs[0].PublicKey() + sigInst.WitnessComponents = append(sigInst.WitnessComponents, txbuilder.DataWitness([]byte(derivedPK))) + + case *common.AddressWitnessScriptHash: + derivedXPubs := xpubs + derivedPKs := chainkd.XPubKeys(derivedXPubs) + script, err := vmutil.P2SPMultiSigProgram(derivedPKs, quorum) + if err != nil { + return nil, nil, err + } + sigInst.WitnessComponents = append(sigInst.WitnessComponents, txbuilder.DataWitness(script)) + + default: + return nil, nil, errors.New("unsupport address type") + } + + return txInput, sigInst, nil +} + +// spendInput convert an utxo to the txinput +func spendInput(u *UTXO) (*types.TxInput, *txbuilder.SigningInstruction, error) { + txSpendInput := types.NewSpendInput(nil, u.SourceID, u.AssetID, u.Amount, u.SourcePos, u.ControlProgram) + sigInst := &txbuilder.SigningInstruction{} + var xpubs []chainkd.XPub + var xprv chainkd.XPrv + xprv.UnmarshalText([]byte(config.CommonConfig.Consensus.Dpos.XPrv)) + xpubs = append(xpubs, xprv.XPub()) + quorum := len(xpubs) + if u.Address == "" { + sigInst.AddWitnessKeysWithOutPath(xpubs, quorum) + return txSpendInput, sigInst, nil + } + + address, err := common.DecodeAddress(u.Address, &consensus.ActiveNetParams) + if err != nil { + return nil, nil, err + } + sigInst.AddRawWitnessKeysWithoutPath(xpubs, quorum) + switch address.(type) { + case *common.AddressWitnessPubKeyHash: + derivedPK := xpubs[0].PublicKey() + sigInst.WitnessComponents = append(sigInst.WitnessComponents, txbuilder.DataWitness([]byte(derivedPK))) + + case *common.AddressWitnessScriptHash: + derivedXPubs := xpubs + derivedPKs := chainkd.XPubKeys(derivedXPubs) + script, err := vmutil.P2SPMultiSigProgram(derivedPKs, quorum) + if err != nil { + return nil, nil, err + } + sigInst.WitnessComponents = append(sigInst.WitnessComponents, txbuilder.DataWitness(script)) + + default: + return nil, nil, errors.New("unsupport address type") + } + + return txSpendInput, sigInst, nil +} diff --git a/account/utxo_keeper.go b/account/utxo_keeper.go index e9e486f2..d00d0f39 100644 --- a/account/utxo_keeper.go +++ b/account/utxo_keeper.go @@ -143,6 +143,39 @@ func (uk *utxoKeeper) Reserve(accountID string, assetID *bc.AssetID, amount uint return result, nil } +func (uk *utxoKeeper) ReserveByAddress(address string, assetID *bc.AssetID, amount uint64, useUnconfirmed bool, isReserved bool) (*reservation, error) { + uk.mtx.Lock() + defer uk.mtx.Unlock() + + utxos, immatureAmount := uk.findUtxosByAddress(address, assetID, useUnconfirmed) + optUtxos, optAmount, reservedAmount := uk.optUTXOs(utxos, amount) + if optAmount+reservedAmount+immatureAmount < amount { + return nil, ErrInsufficient + } + if optAmount+reservedAmount < amount { + return nil, ErrImmature + } + + if optAmount < amount { + return nil, ErrReserved + } + + result := &reservation{ + id: atomic.AddUint64(&uk.nextIndex, 1), + utxos: optUtxos, + change: optAmount - amount, + } + + uk.reservations[result.id] = result + if isReserved { + for _, u := range optUtxos { + uk.reserved[u.OutputID] = result.id + } + } + + return result, nil +} + func (uk *utxoKeeper) ReserveParticular(outHash bc.Hash, useUnconfirmed bool, exp time.Time) (*reservation, error) { uk.mtx.Lock() defer uk.mtx.Unlock() @@ -234,6 +267,41 @@ func (uk *utxoKeeper) findUtxos(accountID string, assetID *bc.AssetID, useUnconf return utxos, immatureAmount } +func (uk *utxoKeeper) findUtxosByAddress(address string, assetID *bc.AssetID, useUnconfirmed bool) ([]*UTXO, uint64) { + immatureAmount := uint64(0) + currentHeight := uk.currentHeight() + utxos := []*UTXO{} + appendUtxo := func(u *UTXO) { + if u.Address != address || u.AssetID != *assetID { + return + } + if u.ValidHeight > currentHeight { + immatureAmount += u.Amount + } else { + utxos = append(utxos, u) + } + } + + utxoIter := uk.db.IteratorPrefix([]byte(UTXOPreFix)) + defer utxoIter.Release() + for utxoIter.Next() { + u := &UTXO{} + if err := json.Unmarshal(utxoIter.Value(), u); err != nil { + log.WithField("err", err).Error("utxoKeeper findUtxos fail on unmarshal utxo") + continue + } + appendUtxo(u) + } + if !useUnconfirmed { + return utxos, immatureAmount + } + + for _, u := range uk.unconfirmed { + appendUtxo(u) + } + return utxos, immatureAmount +} + func (uk *utxoKeeper) findUtxo(outHash bc.Hash, useUnconfirmed bool) (*UTXO, error) { if u, ok := uk.unconfirmed[outHash]; useUnconfirmed && ok { return u, nil diff --git a/api/api.go b/api/api.go index b8cbcfbf..b45a30bf 100644 --- a/api/api.go +++ b/api/api.go @@ -17,7 +17,7 @@ import ( "github.com/vapor/dashboard/dashboard" "github.com/vapor/dashboard/equity" "github.com/vapor/errors" - "github.com/vapor/mining/cpuminer" + "github.com/vapor/mining/miner" "github.com/vapor/mining/miningpool" "github.com/vapor/net/http/authn" "github.com/vapor/net/http/gzip" @@ -105,14 +105,15 @@ func (wh *waitHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { // API is the scheduling center for server type API struct { - sync *netsync.SyncManager - wallet *wallet.Wallet - accessTokens *accesstoken.CredentialStore - chain *protocol.Chain - server *http.Server - handler http.Handler - txFeedTracker *txfeed.Tracker - cpuMiner *cpuminer.CPUMiner + sync *netsync.SyncManager + wallet *wallet.Wallet + accessTokens *accesstoken.CredentialStore + chain *protocol.Chain + server *http.Server + handler http.Handler + txFeedTracker *txfeed.Tracker + //cpuMiner *cpuminer.CPUMiner + miner *miner.Miner miningPool *miningpool.MiningPool notificationMgr *websocket.WSNotificationManager newBlockCh chan *bc.Hash @@ -169,14 +170,14 @@ func (a *API) StartServer(address string) { } // NewAPI create and initialize the API -func NewAPI(sync *netsync.SyncManager, wallet *wallet.Wallet, txfeeds *txfeed.Tracker, cpuMiner *cpuminer.CPUMiner, miningPool *miningpool.MiningPool, chain *protocol.Chain, config *cfg.Config, token *accesstoken.CredentialStore, newBlockCh chan *bc.Hash, notificationMgr *websocket.WSNotificationManager) *API { +func NewAPI(sync *netsync.SyncManager, wallet *wallet.Wallet, txfeeds *txfeed.Tracker, miner *miner.Miner, miningPool *miningpool.MiningPool, chain *protocol.Chain, config *cfg.Config, token *accesstoken.CredentialStore, newBlockCh chan *bc.Hash, notificationMgr *websocket.WSNotificationManager) *API { api := &API{ sync: sync, wallet: wallet, chain: chain, accessTokens: token, txFeedTracker: txfeeds, - cpuMiner: cpuMiner, + miner: miner, miningPool: miningPool, newBlockCh: newBlockCh, @@ -255,6 +256,7 @@ func (a *API) buildHandler() { m.Handle("/get-side-raw-transaction", jsonHandler(a.getSideRawTransaction)) m.Handle("/build-mainchain-tx", jsonHandler(a.buildMainChainTxForContract)) m.Handle("/sign-with-key", jsonHandler(a.signWithKey)) + m.Handle("/dpos", jsonHandler(a.dpos)) } else { log.Warn("Please enable wallet") } diff --git a/api/cliam_transact.go b/api/cliam_transact.go index c19fce09..b6dc4494 100644 --- a/api/cliam_transact.go +++ b/api/cliam_transact.go @@ -147,12 +147,11 @@ func (a *API) createRawPegin(ctx context.Context, ins struct { // 用输出作为交易输入 生成新的交易 builder := txbuilder.NewBuilder(time.Now()) // TODO 根据raw tx生成一个utxo - //txInput := types.NewClaimInputInput(nil, *ins.RawTx.Outputs[nOut].AssetId, ins.RawTx.Outputs[nOut].Amount, cp.ControlProgram) sourceID := *ins.RawTx.OutputID(nOut) outputAccount := ins.RawTx.Outputs[nOut].Amount assetID := *ins.RawTx.Outputs[nOut].AssetId - txInput := types.NewClaimInputInput(nil, sourceID, assetID, outputAccount, uint64(nOut), cp.ControlProgram) + txInput := types.NewClaimInput(nil, sourceID, assetID, outputAccount, uint64(nOut), cp.ControlProgram) if err := builder.AddInput(txInput, &txbuilder.SigningInstruction{}); err != nil { return nil, err } @@ -318,13 +317,12 @@ func (a *API) createContractRawPegin(ctx context.Context, ins struct { // 用输出作为交易输入 生成新的交易 builder := txbuilder.NewBuilder(time.Now()) // TODO 根据raw tx生成一个utxo - //txInput := types.NewClaimInputInput(nil, *ins.RawTx.Outputs[nOut].AssetId, ins.RawTx.Outputs[nOut].Amount, cp.ControlProgram) sourceID := *ins.RawTx.OutputID(nOut) outputAccount := ins.RawTx.Outputs[nOut].Amount assetID := *ins.RawTx.Outputs[nOut].AssetId - txInput := types.NewClaimInputInput(nil, sourceID, assetID, outputAccount, uint64(nOut), cp.ControlProgram) + txInput := types.NewClaimInput(nil, sourceID, assetID, outputAccount, uint64(nOut), cp.ControlProgram) if err := builder.AddInput(txInput, &txbuilder.SigningInstruction{}); err != nil { return nil, err } diff --git a/api/dpos_vote.go b/api/dpos_vote.go new file mode 100644 index 00000000..e2379e10 --- /dev/null +++ b/api/dpos_vote.go @@ -0,0 +1,111 @@ +package api + +import ( + "context" + "time" + + log "github.com/sirupsen/logrus" + "github.com/vapor/account" + "github.com/vapor/blockchain/txbuilder" + "github.com/vapor/config" + "github.com/vapor/crypto/ed25519/chainkd" + "github.com/vapor/errors" + "github.com/vapor/protocol/bc" +) + +func (a *API) dpos(ctx context.Context, ins struct { + To string `json:"to"` + Fee uint64 `json:"fee"` + Stake uint64 `json:"stake"` + TxType uint8 `json:"tx_type"` +}) Response { + // 找到utxo + var assetID bc.AssetID + assetID.UnmarshalText([]byte("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) + // 生成dpos交易 + dpos := account.DopsAction{ + Accounts: a.wallet.AccountMgr, + From: config.CommonConfig.Consensus.Dpos.Coinbase, + To: ins.To, + Fee: ins.Fee, + } + dpos.Amount = ins.Stake + dpos.AssetId = &assetID + builder := txbuilder.NewBuilder(time.Now()) + if err := dpos.Build(ctx, builder); err != nil { + return NewErrorResponse(err) + } + + // 签名 + tmpl, _, err := builder.Build() + if err != nil { + return NewErrorResponse(err) + } + var xprv chainkd.XPrv + xprv.UnmarshalText([]byte(config.CommonConfig.Consensus.Dpos.XPrv)) + if err := signWithKey(tmpl, xprv); err != nil { + return NewErrorResponse(err) + } + log.Info("Sign Transaction complete.") + log.Info(txbuilder.SignProgress(tmpl)) + //return NewSuccessResponse(&signTemplateResp{Tx: tmpl, SignComplete: txbuilder.SignProgress(tmpl)}) + // 提交 + + if err := txbuilder.FinalizeTx(ctx, a.chain, tmpl.Transaction); err != nil { + return NewErrorResponse(err) + } + + log.WithField("tx_id", tmpl.Transaction.ID.String()).Info("submit single tx") + return NewSuccessResponse(&submitTxResp{TxID: &tmpl.Transaction.ID}) + + //return NewSuccessResponse(nil) +} + +func signWithKey(tmpl *txbuilder.Template, xprv chainkd.XPrv) error { + for i, sigInst := range tmpl.SigningInstructions { + for j, wc := range sigInst.WitnessComponents { + switch sw := wc.(type) { + case *txbuilder.SignatureWitness: + err := sw.Sign(tmpl, uint32(i), xprv) + if err != nil { + return errors.WithDetailf(err, "adding signature(s) to signature witness component %d of input %d", j, i) + } + case *txbuilder.RawTxSigWitness: + err := sw.Sign(tmpl, uint32(i), xprv) + if err != nil { + return errors.WithDetailf(err, "adding signature(s) to raw-signature witness component %d of input %d", j, i) + } + } + } + } + return materializeWitnessesWithKey(tmpl) +} + +func materializeWitnessesWithKey(txTemplate *txbuilder.Template) error { + msg := txTemplate.Transaction + + if msg == nil { + return errors.Wrap(txbuilder.ErrMissingRawTx) + } + + if len(txTemplate.SigningInstructions) > len(msg.Inputs) { + return errors.Wrap(txbuilder.ErrBadInstructionCount) + } + + for i, sigInst := range txTemplate.SigningInstructions { + if msg.Inputs[sigInst.Position] == nil { + return errors.WithDetailf(txbuilder.ErrBadTxInputIdx, "signing instruction %d references missing tx input %d", i, sigInst.Position) + } + + var witness [][]byte + for j, wc := range sigInst.WitnessComponents { + err := wc.Materialize(&witness) + if err != nil { + return errors.WithDetailf(err, "error in witness component %d of input %d", j, i) + } + } + msg.SetInputArguments(sigInst.Position, witness) + } + + return nil +} diff --git a/api/hsm.go b/api/hsm.go index 60d96e37..96b968e5 100644 --- a/api/hsm.go +++ b/api/hsm.go @@ -6,6 +6,9 @@ import ( log "github.com/sirupsen/logrus" "github.com/vapor/blockchain/txbuilder" + "github.com/vapor/common" + "github.com/vapor/consensus" + "github.com/vapor/crypto" "github.com/vapor/crypto/ed25519/chainkd" ) @@ -142,8 +145,9 @@ func (a *API) pseudohsmCheckPassword(ctx context.Context, ins struct { } type keyPair struct { - Xpub chainkd.XPub `json:"xpub"` - Xprv chainkd.XPrv `json:"xprv"` + Xpub chainkd.XPub `json:"xpub"` + Xprv chainkd.XPrv `json:"xprv"` + Address string `json:"address,omitempty"` } func (a *API) createXKeys(ctx context.Context) Response { @@ -151,5 +155,13 @@ func (a *API) createXKeys(ctx context.Context) Response { if err != nil { return NewErrorResponse(err) } - return NewSuccessResponse(&keyPair{Xprv: xprv, Xpub: xpub}) + + pubHash := crypto.Ripemd160(xpub.PublicKey()) + + address, err := common.NewAddressWitnessPubKeyHash(pubHash, &consensus.ActiveNetParams) + if err != nil { + return NewErrorResponse(err) + } + + return NewSuccessResponse(&keyPair{Xprv: xprv, Xpub: xpub, Address: address.EncodeAddress()}) } diff --git a/api/miner.go b/api/miner.go index 671084d7..3ba3f066 100644 --- a/api/miner.go +++ b/api/miner.go @@ -198,7 +198,8 @@ func (a *API) setMining(in struct { } func (a *API) startMining() Response { - a.cpuMiner.Start() + //a.cpuMiner.Start() + a.miner.Start() if !a.IsMining() { return NewErrorResponse(errors.New("Failed to start mining")) } @@ -206,7 +207,8 @@ func (a *API) startMining() Response { } func (a *API) stopMining() Response { - a.cpuMiner.Stop() + //a.cpuMiner.Stop() + a.miner.Stop() if a.IsMining() { return NewErrorResponse(errors.New("Failed to stop mining")) } diff --git a/api/nodeinfo.go b/api/nodeinfo.go index c0b55c1e..c4125a1e 100644 --- a/api/nodeinfo.go +++ b/api/nodeinfo.go @@ -33,7 +33,7 @@ func (a *API) GetNodeInfo() *NetInfo { info := &NetInfo{ Listening: a.sync.Switch().IsListening(), Syncing: !a.sync.IsCaughtUp(), - Mining: a.cpuMiner.IsMining(), + Mining: a.miner.IsMining(), PeerCount: len(a.sync.Switch().Peers().List()), CurrentBlock: a.chain.BestBlockHeight(), NetWorkID: a.sync.NodeInfo().Network, @@ -101,7 +101,7 @@ func (a *API) isMining() Response { // IsMining return mining status func (a *API) IsMining() bool { - return a.cpuMiner.IsMining() + return a.miner.IsMining() } // return the peers of current node diff --git a/api/transact.go b/api/transact.go index 1c0f2741..1f34cb17 100644 --- a/api/transact.go +++ b/api/transact.go @@ -34,6 +34,7 @@ func (a *API) actionDecoder(action string) (func([]byte) (txbuilder.Action, erro "retire": txbuilder.DecodeRetireAction, "spend_account": a.wallet.AccountMgr.DecodeSpendAction, "spend_account_unspent_output": a.wallet.AccountMgr.DecodeSpendUTXOAction, + "dpos_address": a.wallet.AccountMgr.DecodeDposAction, } decoder, ok := decoders[action] return decoder, ok @@ -46,7 +47,9 @@ func onlyHaveInputActions(req *BuildRequest) (bool, error) { if !ok { return false, errors.WithDetailf(ErrBadActionType, "no action type provided on action %d", i) } - + if strings.HasPrefix(actionType, "dpos_address") { + return false, nil + } if strings.HasPrefix(actionType, "spend") || actionType == "issue" { count++ } diff --git a/blockchain/txbuilder/data_witness.go b/blockchain/txbuilder/data_witness.go index 1f49b2ca..333bdb59 100644 --- a/blockchain/txbuilder/data_witness.go +++ b/blockchain/txbuilder/data_witness.go @@ -9,7 +9,7 @@ import ( // DataWitness used sign transaction type DataWitness chainjson.HexBytes -func (dw DataWitness) materialize(args *[][]byte) error { +func (dw DataWitness) Materialize(args *[][]byte) error { *args = append(*args, dw) return nil } diff --git a/blockchain/txbuilder/finalize.go b/blockchain/txbuilder/finalize.go index daa6308b..1cafe4f0 100644 --- a/blockchain/txbuilder/finalize.go +++ b/blockchain/txbuilder/finalize.go @@ -88,6 +88,8 @@ func checkTxSighashCommitment(tx *types.Tx) error { args = t.Arguments case *types.ClaimInput: args = t.Arguments + case *types.DposTx: + args = t.Arguments } // Note: These numbers will need to change if more args are added such that the minimum length changes switch { @@ -131,7 +133,7 @@ func CalculateTxFee(tx *types.Tx) (fee uint64) { totalOutputBTM := uint64(0) for _, input := range tx.Inputs { - if input.InputType() != types.CoinbaseInputType && input.AssetID() == *consensus.BTMAssetID { + if input.InputType() != types.CoinbaseInputType && input.InputType() != types.DposInputType && input.AssetID() == *consensus.BTMAssetID { totalInputBTM += input.Amount() } } diff --git a/blockchain/txbuilder/rawtxsig_witness.go b/blockchain/txbuilder/rawtxsig_witness.go index 53d3585f..85caa944 100644 --- a/blockchain/txbuilder/rawtxsig_witness.go +++ b/blockchain/txbuilder/rawtxsig_witness.go @@ -6,6 +6,7 @@ import ( log "github.com/sirupsen/logrus" + "github.com/vapor/crypto/ed25519/chainkd" chainjson "github.com/vapor/encoding/json" ) @@ -53,7 +54,35 @@ func (sw *RawTxSigWitness) sign(ctx context.Context, tpl *Template, index uint32 return nil } -func (sw RawTxSigWitness) materialize(args *[][]byte) error { +func (sw *RawTxSigWitness) Sign(tpl *Template, index uint32, xprv chainkd.XPrv) error { + if len(sw.Sigs) < len(sw.Keys) { + // Each key in sw.Keys may produce a signature in sw.Sigs. Make + // sure there are enough slots in sw.Sigs and that we preserve any + // sigs already present. + newSigs := make([]chainjson.HexBytes, len(sw.Keys)) + copy(newSigs, sw.Sigs) + sw.Sigs = newSigs + } + for i, keyID := range sw.Keys { + if len(sw.Sigs[i]) > 0 { + // Already have a signature for this key + continue + } + if keyID.XPub.String() != xprv.XPub().String() { + continue + } + data := tpl.Hash(index).Byte32() + sigBytes := xprv.Sign(data[:]) + // This break is ordered to avoid signing transaction successfully only once for a multiple-sign account + // that consist of different keys by the same password. Exit immediately when the signature is success, + // it means that only one signature will be successful in the loop for this multiple-sign account. + sw.Sigs[i] = sigBytes + break + } + return nil +} + +func (sw RawTxSigWitness) Materialize(args *[][]byte) error { var nsigs int for i := 0; i < len(sw.Sigs) && nsigs < sw.Quorum; i++ { if len(sw.Sigs[i]) > 0 { diff --git a/blockchain/txbuilder/signature_witness.go b/blockchain/txbuilder/signature_witness.go index ff6674af..576d0958 100644 --- a/blockchain/txbuilder/signature_witness.go +++ b/blockchain/txbuilder/signature_witness.go @@ -99,7 +99,56 @@ func (sw *SignatureWitness) sign(ctx context.Context, tpl *Template, index uint3 return nil } -func (sw SignatureWitness) materialize(args *[][]byte) error { +func (sw *SignatureWitness) Sign(tpl *Template, index uint32, xprv chainkd.XPrv) error { + // Compute the predicate to sign. This is either a + // txsighash program if tpl.AllowAdditional is false (i.e., the tx is complete + // and no further changes are allowed) or a program enforcing + // constraints derived from the existing outputs and current input. + if len(sw.Program) == 0 { + var err error + sw.Program, err = buildSigProgram(tpl, tpl.SigningInstructions[index].Position) + if err != nil { + return err + } + if len(sw.Program) == 0 { + return ErrEmptyProgram + } + } + if len(sw.Sigs) < len(sw.Keys) { + // Each key in sw.Keys may produce a signature in sw.Sigs. Make + // sure there are enough slots in sw.Sigs and that we preserve any + // sigs already present. + newSigs := make([]chainjson.HexBytes, len(sw.Keys)) + copy(newSigs, sw.Sigs) + sw.Sigs = newSigs + } + var h [32]byte + sha3pool.Sum256(h[:], sw.Program) + for i, keyID := range sw.Keys { + if len(sw.Sigs[i]) > 0 { + // Already have a signature for this key + continue + } + path := make([][]byte, len(keyID.DerivationPath)) + for i, p := range keyID.DerivationPath { + path[i] = p + } + if keyID.XPub.String() != xprv.XPub().String() { + continue + } + + sigBytes := xprv.Sign(h[:]) + + // This break is ordered to avoid signing transaction successfully only once for a multiple-sign account + // that consist of different keys by the same password. Exit immediately when the signature is success, + // it means that only one signature will be successful in the loop for this multiple-sign account. + sw.Sigs[i] = sigBytes + break + } + return nil +} + +func (sw SignatureWitness) Materialize(args *[][]byte) error { // This is the value of N for the CHECKPREDICATE call. The code // assumes that everything already in the arg list before this call // to Materialize is input to the signature program, so N is diff --git a/blockchain/txbuilder/signing_instruction.go b/blockchain/txbuilder/signing_instruction.go index e8786e2f..c0f8a2d1 100644 --- a/blockchain/txbuilder/signing_instruction.go +++ b/blockchain/txbuilder/signing_instruction.go @@ -29,6 +29,20 @@ func (si *SigningInstruction) AddWitnessKeys(xpubs []chainkd.XPub, path [][]byte si.WitnessComponents = append(si.WitnessComponents, sw) } +func (si *SigningInstruction) AddWitnessKeysWithOutPath(xpubs []chainkd.XPub, quorum int) { + + keyIDs := make([]keyID, 0, len(xpubs)) + for _, xpub := range xpubs { + keyIDs = append(keyIDs, keyID{XPub: xpub}) + } + + sw := &SignatureWitness{ + Quorum: quorum, + Keys: keyIDs, + } + si.WitnessComponents = append(si.WitnessComponents, sw) +} + // AddRawWitnessKeys adds a SignatureWitness with the given quorum and // list of keys derived by applying the derivation path to each of the // xpubs. @@ -50,6 +64,20 @@ func (si *SigningInstruction) AddRawWitnessKeys(xpubs []chainkd.XPub, path [][]b si.WitnessComponents = append(si.WitnessComponents, sw) } +func (si *SigningInstruction) AddRawWitnessKeysWithoutPath(xpubs []chainkd.XPub, quorum int) { + + keyIDs := make([]keyID, 0, len(xpubs)) + for _, xpub := range xpubs { + keyIDs = append(keyIDs, keyID{XPub: xpub}) + } + + sw := &RawTxSigWitness{ + Quorum: quorum, + Keys: keyIDs, + } + si.WitnessComponents = append(si.WitnessComponents, sw) +} + // SigningInstruction gives directions for signing inputs in a TxTemplate. type SigningInstruction struct { Position uint32 `json:"position"` @@ -61,7 +89,7 @@ type SigningInstruction struct { // arguments for a VM program via its materialize method. Concrete // witnessComponent types include SignatureWitness and dataWitness. type witnessComponent interface { - materialize(*[][]byte) error + Materialize(*[][]byte) error } // UnmarshalJSON unmarshal SigningInstruction diff --git a/blockchain/txbuilder/witness.go b/blockchain/txbuilder/witness.go index 582b06ff..7447ba8b 100644 --- a/blockchain/txbuilder/witness.go +++ b/blockchain/txbuilder/witness.go @@ -33,7 +33,7 @@ func materializeWitnesses(txTemplate *Template) error { var witness [][]byte for j, wc := range sigInst.WitnessComponents { - err := wc.materialize(&witness) + err := wc.Materialize(&witness) if err != nil { return errors.WithDetailf(err, "error in witness component %d of input %d", j, i) } diff --git a/chain/chain.go b/chain/chain.go new file mode 100644 index 00000000..7279cf69 --- /dev/null +++ b/chain/chain.go @@ -0,0 +1,22 @@ +package chain + +import ( + "github.com/vapor/protocol/bc" + "github.com/vapor/protocol/bc/types" +) + +// Chain is the interface for Bytom core +type Chain interface { + BestBlockHeader() *types.BlockHeader + BestBlockHeight() uint64 + CalcNextSeed(*bc.Hash) (*bc.Hash, error) + GetBlockByHash(*bc.Hash) (*types.Block, error) + GetBlockByHeight(uint64) (*types.Block, error) + GetHeaderByHash(*bc.Hash) (*types.BlockHeader, error) + GetHeaderByHeight(uint64) (*types.BlockHeader, error) + GetTransactionStatus(*bc.Hash) (*bc.TransactionStatus, error) + InMainChain(bc.Hash) bool + ProcessBlock(*types.Block) (bool, error) + ValidateTx(*types.Tx) (bool, error) + GetAuthoritys(string) string +} diff --git a/cmd/vapor/commands/run_node.go b/cmd/vapor/commands/run_node.go index 98e608ff..fdf7172c 100644 --- a/cmd/vapor/commands/run_node.go +++ b/cmd/vapor/commands/run_node.go @@ -67,6 +67,8 @@ func init() { runNodeCmd.Flags().String("signer", config.Signer, "The signer corresponds to xpub of signblock") runNodeCmd.Flags().String("side.sign_block_xpubs", config.Side.SignBlockXPubs, "Change federated peg to use a different xpub.") + runNodeCmd.Flags().String("consensus_config_file", config.ConsensusConfigFile, "consensus configuration file") + RootCmd.AddCommand(runNodeCmd) } diff --git a/cmd/vapor/consensus.json b/cmd/vapor/consensus.json new file mode 100644 index 00000000..b6585cc4 --- /dev/null +++ b/cmd/vapor/consensus.json @@ -0,0 +1,16 @@ +{ + "consensus":{ + "dpos": { + "period": 1, + "epoch": 300, + "maxSignersCount": 1, + "minVoterBalance": 0, + "genesisTimestamp": 1524549600, + "coinbase": "vsm1qkm743xmgnvh84pmjchq2s4tnfpgu9ae2f9slep", + "xprv": "a8e281b615809046698fb0b0f2804a36d824d48fa443350f10f1b80649d39e5f1e85cf9855548915e36137345910606cbc8e7dd8497c831dce899ee6ac112445", + "signers": [ + "vsm1qkm743xmgnvh84pmjchq2s4tnfpgu9ae2f9slep" + ] + } + } +} \ No newline at end of file diff --git a/config/config.go b/config/config.go index 616704b0..13d86293 100644 --- a/config/config.go +++ b/config/config.go @@ -1,7 +1,6 @@ package config import ( - "math/big" "os" "os/user" "path/filepath" @@ -27,6 +26,7 @@ type Config struct { Side *SideChainConfig `mapstructure:"side"` MainChain *MainChainRpcConfig `mapstructure:"mainchain"` Websocket *WebsocketConfig `mapstructure:"ws"` + Consensus *ConsensusConfig `mapstructure:"consensus"` } // Default configurable parameters. @@ -40,6 +40,7 @@ func DefaultConfig() *Config { Side: DefaultSideChainConfig(), MainChain: DefaultMainChainRpc(), Websocket: DefaultWebsocketConfig(), + Consensus: DefaultConsensusCOnfig(), } } @@ -86,9 +87,11 @@ type BaseConfig struct { // log file name LogFile string `mapstructure:"log_file"` - //Validate pegin proof by checking bytom transaction inclusion in mainchain. + // Validate pegin proof by checking bytom transaction inclusion in mainchain. ValidatePegin bool `mapstructure:"validate_pegin"` Signer string `mapstructure:"signer"` + + ConsensusConfigFile string `mapstructure:"consensus_config_file"` } // Default configurable base parameters. @@ -171,13 +174,20 @@ type WebsocketConfig struct { MaxNumConcurrentReqs int `mapstructure:"max_num_concurrent_reqs"` } +type ConsensusConfig struct { + Dpos *DposConfig `mapstructure:"dpos"` +} + type DposConfig struct { - Period uint64 `json:"period"` // Number of seconds between blocks to enforce - Epoch uint64 `json:"epoch"` // Epoch length to reset votes and checkpoint - MaxSignerCount uint64 `json:"max_signers_count"` // Max count of signers - MinVoterBalance *big.Int `json:"min_boter_balance"` // Min voter balance to valid this vote - GenesisTimestamp uint64 `json:"genesis_timestamp"` // The LoopStartTime of first Block - SelfVoteSigners []common.Address `json:"signers"` // Signers vote by themselves to seal the block, make sure the signer accounts are pre-funded + Period uint64 `json:"period"` // Number of seconds between blocks to enforce + Epoch uint64 `json:"epoch"` // Epoch length to reset votes and checkpoint + MaxSignerCount uint64 `json:"max_signers_count"` // Max count of signers + MinVoterBalance uint64 `json:"min_boter_balance"` // Min voter balance to valid this vote + GenesisTimestamp uint64 `json:"genesis_timestamp"` // The LoopStartTime of first Block + Coinbase string `json:"coinbase"` + XPrv string `json:"xprv"` + SelfVoteSigners []string `json:"signers"` // Signers vote by themselves to seal the block, make sure the signer accounts are pre-funded + Signers []common.Address } // Default configurable rpc's auth parameters. @@ -225,6 +235,20 @@ func DefaultWebsocketConfig() *WebsocketConfig { } } +func DefaultDposConfig() *DposConfig { + return &DposConfig{ + Period: 1, + Epoch: 300, + MaxSignerCount: 1, + MinVoterBalance: 0, + GenesisTimestamp: 1524549600, + } +} + +func DefaultConsensusCOnfig() *ConsensusConfig { + return &ConsensusConfig{Dpos: DefaultDposConfig()} +} + //----------------------------------------------------------------------------- // Utils diff --git a/config/genesis.go b/config/genesis.go index 81907344..1443862f 100644 --- a/config/genesis.go +++ b/config/genesis.go @@ -1,13 +1,13 @@ package config import ( + "bytes" "crypto/sha256" "encoding/hex" log "github.com/sirupsen/logrus" "github.com/vapor/consensus" - "github.com/vapor/crypto" "github.com/vapor/crypto/ed25519" "github.com/vapor/crypto/ed25519/chainkd" "github.com/vapor/protocol/bc" @@ -17,20 +17,20 @@ import ( func commitToArguments() (res *[32]byte) { var fedpegPubkeys []ed25519.PublicKey - var signBlockPubkeys []ed25519.PublicKey for _, xpub := range consensus.ActiveNetParams.FedpegXPubs { fedpegPubkeys = append(fedpegPubkeys, xpub.PublicKey()) } fedpegScript, _ := vmutil.P2SPMultiSigProgram(fedpegPubkeys, len(fedpegPubkeys)) - for _, xpub := range consensus.ActiveNetParams.SignBlockXPubs { - signBlockPubkeys = append(signBlockPubkeys, xpub.PublicKey()) + var buffer bytes.Buffer + for _, address := range CommonConfig.Consensus.Dpos.Signers { + redeemContract := address.ScriptAddress() + buffer.Write(redeemContract) } - signBlockScript, _ := vmutil.P2SPMultiSigProgram(signBlockPubkeys, len(signBlockPubkeys)) hasher := sha256.New() hasher.Write(fedpegScript) - hasher.Write(signBlockScript) + hasher.Write(buffer.Bytes()) resSlice := hasher.Sum(nil) res = new([32]byte) copy(res[:], resSlice) @@ -56,6 +56,7 @@ func genesisTx() *types.Tx { types.NewTxOutput(*consensus.BTMAssetID, consensus.InitialBlockSubsidy, contract), }, } + return types.NewTx(txData) } @@ -75,6 +76,13 @@ func mainNetGenesisBlock() *types.Block { log.Panicf("fail on calc genesis tx merkel root") } + var xPrv chainkd.XPrv + if CommonConfig.Consensus.Dpos.XPrv == "" { + log.Panicf("Signer is empty") + } + xPrv.UnmarshalText([]byte(CommonConfig.Consensus.Dpos.XPrv)) + b, _ := xPrv.XPub().MarshalText() + block := &types.Block{ BlockHeader: types.BlockHeader{ Version: 1, @@ -84,24 +92,10 @@ func mainNetGenesisBlock() *types.Block { TransactionsMerkleRoot: merkleRoot, TransactionStatusHash: txStatusHash, }, + Coinbase: b, }, Transactions: []*types.Tx{tx}, } - - var xPrv chainkd.XPrv - if consensus.ActiveNetParams.Signer == "" { - return block - } - copy(xPrv[:], []byte(consensus.ActiveNetParams.Signer)) - msg, _ := block.MarshalText() - sign := xPrv.Sign(msg) - pubHash := crypto.Ripemd160(xPrv.XPub().PublicKey()) - control, err := vmutil.P2WPKHProgram([]byte(pubHash)) - if err != nil { - log.Panicf(err.Error()) - } - block.Proof.Sign = sign - block.Proof.ControlProgram = control return block } @@ -121,6 +115,13 @@ func testNetGenesisBlock() *types.Block { log.Panicf("fail on calc genesis tx merkel root") } + var xPrv chainkd.XPrv + if CommonConfig.Consensus.Dpos.XPrv == "" { + log.Panicf("Signer is empty") + } + xPrv.UnmarshalText([]byte(CommonConfig.Consensus.Dpos.XPrv)) + b, _ := xPrv.XPub().MarshalText() + block := &types.Block{ BlockHeader: types.BlockHeader{ Version: 1, @@ -130,6 +131,7 @@ func testNetGenesisBlock() *types.Block { TransactionsMerkleRoot: merkleRoot, TransactionStatusHash: txStatusHash, }, + Coinbase: b, }, Transactions: []*types.Tx{tx}, } @@ -152,6 +154,13 @@ func soloNetGenesisBlock() *types.Block { log.Panicf("fail on calc genesis tx merkel root") } + var xPrv chainkd.XPrv + if CommonConfig.Consensus.Dpos.XPrv == "" { + log.Panicf("Signer is empty") + } + xPrv.UnmarshalText([]byte(CommonConfig.Consensus.Dpos.XPrv)) + b, _ := xPrv.XPub().MarshalText() + block := &types.Block{ BlockHeader: types.BlockHeader{ Version: 1, @@ -161,6 +170,7 @@ func soloNetGenesisBlock() *types.Block { TransactionsMerkleRoot: merkleRoot, TransactionStatusHash: txStatusHash, }, + Coinbase: b, }, Transactions: []*types.Tx{tx}, } diff --git a/consensus/consensus/consensus.go b/consensus/consensus/consensus.go new file mode 100644 index 00000000..d5eb7d75 --- /dev/null +++ b/consensus/consensus/consensus.go @@ -0,0 +1,44 @@ +package consensus + +import ( + "github.com/vapor/chain" + "github.com/vapor/protocol/bc" + "github.com/vapor/protocol/bc/types" +) + +// Engine is an algorithm agnostic consensus engine. +type Engine interface { + // Author retrieves the Ethereum address of the account that minted the given + // block, which may be different from the header's coinbase if a consensus + // engine is based on signatures. + Author(header *types.BlockHeader, c chain.Chain) (string, error) + + // VerifyHeader checks whether a header conforms to the consensus rules of a + // given engine. Verifying the seal may be done optionally here, or explicitly + // via the VerifySeal method. + VerifyHeader(c chain.Chain, header *types.BlockHeader, seal bool) error + + // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers + // concurrently. The method returns a quit channel to abort the operations and + // a results channel to retrieve the async verifications (the order is that of + // the input slice). + VerifyHeaders(c chain.Chain, headers []*types.BlockHeader, seals []bool) (chan<- struct{}, <-chan error) + + // VerifySeal checks whether the crypto seal on a header is valid according to + // the consensus rules of the given engine. + VerifySeal(c chain.Chain, header *types.BlockHeader) error + + // Prepare initializes the consensus fields of a block header according to the + // rules of a particular engine. The changes are executed inline. + Prepare(c chain.Chain, header *types.BlockHeader) error + + // Finalize runs any post-transaction state modifications (e.g. block rewards) + // and assembles the final block. + // Note: The block header and state database might be updated to reflect any + // consensus rules that happen at finalization (e.g. block rewards). + Finalize(c chain.Chain, header *types.BlockHeader, txs []*bc.Tx) error + + // Seal generates a new block for the given input block with the local miner's + // seal place on top. + Seal(c chain.Chain, block *types.Block) (*types.Block, error) +} diff --git a/consensus/consensus/dpos/custom_tx.go b/consensus/consensus/dpos/custom_tx.go new file mode 100644 index 00000000..8cc8b5a9 --- /dev/null +++ b/consensus/consensus/dpos/custom_tx.go @@ -0,0 +1,350 @@ +package dpos + +import ( + "encoding/json" + "strconv" + "strings" + + log "github.com/sirupsen/logrus" + "github.com/vapor/chain" + "github.com/vapor/protocol/bc" + "github.com/vapor/protocol/bc/types" +) + +const ( + /* + * vapor:version:category:action/data + */ + vaporPrefix = "vapor" + vaporVersion = "1" + vaporCategoryEvent = "event" + vaporCategoryLog = "oplog" + vaporCategorySC = "sc" + vaporEventVote = "vote" + vaporEventConfirm = "confirm" + vaporEventPorposal = "proposal" + vaporEventDeclare = "declare" + + vaporMinSplitLen = 3 + posPrefix = 0 + posVersion = 1 + posCategory = 2 + posEventVote = 3 + posEventConfirm = 3 + posEventProposal = 3 + posEventDeclare = 3 + posEventConfirmNumber = 4 + + /* + * proposal type + */ + proposalTypeCandidateAdd = 1 + proposalTypeCandidateRemove = 2 + proposalTypeMinerRewardDistributionModify = 3 // count in one thousand + + /* + * proposal related + */ + maxValidationLoopCnt = 123500 // About one month if seal each block per second & 21 super nodes + minValidationLoopCnt = 12350 // About three days if seal each block per second & 21 super nodes + defaultValidationLoopCnt = 30875 // About one week if seal each block per second & 21 super nodes +) + +// Vote : +// vote come from custom tx which data like "vapor:1:event:vote" +// Sender of tx is Voter, the tx.to is Candidate +// Stake is the balance of Voter when create this vote +type Vote struct { + Voter string `json:"Voter"` + Candidate string `json:"Candidate"` + Stake uint64 `json:"Stake"` +} + +// Confirmation : +// confirmation come from custom tx which data like "vapor:1:event:confirm:123" +// 123 is the block number be confirmed +// Sender of tx is Signer only if the signer in the SignerQueue for block number 123 +type Confirmation struct { + Signer string `json:"signer"` + BlockNumber uint64 `json:"block_number"` +} + +// Proposal : +// proposal come from custom tx which data like "vapor:1:event:proposal:candidate:add:address" or "vapor:1:event:proposal:percentage:60" +// proposal only come from the current candidates +// not only candidate add/remove , current signer can proposal for params modify like percentage of reward distribution ... +type Proposal struct { + Hash bc.Hash `json:"hash"` // tx hash + ValidationLoopCnt uint64 `json:"ValidationLoopCnt"` // validation block number length of this proposal from the received block number + ImplementNumber uint64 `json:"ImplementNumber"` // block number to implement modification in this proposal + ProposalType uint64 `json:"ProposalType"` // type of proposal 1 - add candidate 2 - remove candidate ... + Proposer string `json:"Proposer"` // + Candidate string `json:"Candidate"` + MinerRewardPerThousand uint64 `json:"MinerRewardPerThousand"` + Declares []*Declare `json:"Declares"` // Declare this proposal received + ReceivedNumber uint64 `json:"ReceivedNumber"` // block number of proposal received +} + +func (p *Proposal) copy() *Proposal { + cpy := &Proposal{ + Hash: p.Hash, + ValidationLoopCnt: p.ValidationLoopCnt, + ImplementNumber: p.ImplementNumber, + ProposalType: p.ProposalType, + Proposer: p.Proposer, + Candidate: p.Candidate, + MinerRewardPerThousand: p.MinerRewardPerThousand, + Declares: make([]*Declare, len(p.Declares)), + ReceivedNumber: p.ReceivedNumber, + } + + copy(cpy.Declares, p.Declares) + return cpy +} + +// Declare : +// declare come from custom tx which data like "vapor:1:event:declare:hash:yes" +// proposal only come from the current candidates +// hash is the hash of proposal tx +type Declare struct { + ProposalHash bc.Hash `json:"ProposalHash"` + Declarer string `json:"Declarer"` + Decision bool `json:"Decision"` +} + +// HeaderExtra is the struct of info in header.Extra[extraVanity:len(header.extra)-extraSeal] +type HeaderExtra struct { + CurrentBlockConfirmations []Confirmation `json:"current_block_confirmations"` + CurrentBlockVotes []Vote `json:"CurrentBlockVotes"` + CurrentBlockProposals []Proposal `json:"CurrentBlockProposals"` + CurrentBlockDeclares []Declare `json:"CurrentBlockDeclares"` + ModifyPredecessorVotes []Vote `json:"ModifyPredecessorVotes"` + LoopStartTime uint64 `json:"LoopStartTime"` + SignerQueue []string `json:"SignerQueue"` + SignerMissing []string `json:"SignerMissing"` + ConfirmedBlockNumber uint64 `json:"ConfirmedBlockNumber"` +} + +// Calculate Votes from transaction in this block, write into header.Extra +func (d *Dpos) processCustomTx(headerExtra HeaderExtra, c chain.Chain, header *types.BlockHeader, txs []*bc.Tx) (HeaderExtra, error) { + + var ( + snap *Snapshot + err error + height uint64 + ) + height = header.Height + if height > 1 { + snap, err = d.snapshot(c, height-1, header.PreviousBlockHash, nil, nil, defaultLoopCntRecalculateSigners) + if err != nil { + return headerExtra, err + } + } + + for _, tx := range txs { + var ( + from string + to string + ) + dpos := new(bc.Dpos) + stake := uint64(0) + for _, value := range tx.Entries { + switch d := value.(type) { + case *bc.Dpos: + from = d.From + to = d.To + dpos = d + stake = d.Stake + default: + continue + } + + if len(dpos.Data) >= len(vaporPrefix) { + txData := dpos.Data + txDataInfo := strings.Split(txData, ":") + if len(txDataInfo) >= vaporMinSplitLen && txDataInfo[posPrefix] == vaporPrefix && txDataInfo[posVersion] == vaporVersion { + switch txDataInfo[posCategory] { + case vaporCategoryEvent: + if len(txDataInfo) > vaporMinSplitLen { + if txDataInfo[posEventVote] == vaporEventVote && (!candidateNeedPD || snap.isCandidate(to)) { + headerExtra.CurrentBlockVotes = d.processEventVote(headerExtra.CurrentBlockVotes, stake, from, to) + } else if txDataInfo[posEventConfirm] == vaporEventConfirm { + headerExtra.CurrentBlockConfirmations = d.processEventConfirm(headerExtra.CurrentBlockConfirmations, c, txDataInfo, height, tx, from) + } else if txDataInfo[posEventProposal] == vaporEventPorposal && snap.isCandidate(from) { + headerExtra.CurrentBlockProposals = d.processEventProposal(headerExtra.CurrentBlockProposals, txDataInfo, tx, from) + } else if txDataInfo[posEventDeclare] == vaporEventDeclare && snap.isCandidate(from) { + headerExtra.CurrentBlockDeclares = d.processEventDeclare(headerExtra.CurrentBlockDeclares, txDataInfo, tx, from) + + } + } else { + // todo : something wrong, leave this transaction to process as normal transaction + } + + case vaporCategoryLog: + // todo : + case vaporCategorySC: + // todo : + } + } + } + /* + if height > 1 { + headerExtra.ModifyPredecessorVotes = d.processPredecessorVoter(headerExtra.ModifyPredecessorVotes, stake, from, to, snap) + } + */ + } + } + + return headerExtra, nil +} + +func (d *Dpos) processEventProposal(currentBlockProposals []Proposal, txDataInfo []string, tx *bc.Tx, proposer string) []Proposal { + proposal := Proposal{ + Hash: tx.ID, + ValidationLoopCnt: defaultValidationLoopCnt, + ImplementNumber: uint64(1), + ProposalType: proposalTypeCandidateAdd, + Proposer: proposer, + MinerRewardPerThousand: minerRewardPerThousand, + Declares: []*Declare{}, + ReceivedNumber: uint64(0), + } + + for i := 0; i < len(txDataInfo[posEventProposal+1:])/2; i++ { + k, v := txDataInfo[posEventProposal+1+i*2], txDataInfo[posEventProposal+2+i*2] + switch k { + case "vlcnt": + // If vlcnt is missing then user default value, but if the vlcnt is beyond the min/max value then ignore this proposal + if validationLoopCnt, err := strconv.Atoi(v); err != nil || validationLoopCnt < minValidationLoopCnt || validationLoopCnt > maxValidationLoopCnt { + return currentBlockProposals + } else { + proposal.ValidationLoopCnt = uint64(validationLoopCnt) + } + case "implement_number": + if implementNumber, err := strconv.Atoi(v); err != nil || implementNumber <= 0 { + return currentBlockProposals + } else { + proposal.ImplementNumber = uint64(implementNumber) + } + case "proposal_type": + if proposalType, err := strconv.Atoi(v); err != nil || (proposalType != proposalTypeCandidateAdd && proposalType != proposalTypeCandidateRemove && proposalType != proposalTypeMinerRewardDistributionModify) { + return currentBlockProposals + } else { + proposal.ProposalType = uint64(proposalType) + } + case "candidate": + // not check here + //proposal.Candidate.UnmarshalText([]byte(v)) + /* + address, err := common.DecodeAddress(v, &consensus.ActiveNetParams) + if err != nil { + return currentBlockProposals + } + */ + proposal.Candidate = v + case "mrpt": + // miner reward per thousand + if mrpt, err := strconv.Atoi(v); err != nil || mrpt < 0 || mrpt > 1000 { + return currentBlockProposals + } else { + proposal.MinerRewardPerThousand = uint64(mrpt) + } + + } + } + + return append(currentBlockProposals, proposal) +} + +func (d *Dpos) processEventDeclare(currentBlockDeclares []Declare, txDataInfo []string, tx *bc.Tx, declarer string) []Declare { + declare := Declare{ + ProposalHash: bc.Hash{}, + Declarer: declarer, + Decision: true, + } + + for i := 0; i < len(txDataInfo[posEventDeclare+1:])/2; i++ { + k, v := txDataInfo[posEventDeclare+1+i*2], txDataInfo[posEventDeclare+2+i*2] + switch k { + case "hash": + declare.ProposalHash.UnmarshalText([]byte(v)) + case "decision": + if v == "yes" { + declare.Decision = true + } else if v == "no" { + declare.Decision = false + } else { + return currentBlockDeclares + } + } + } + + return append(currentBlockDeclares, declare) +} + +func (d *Dpos) processEventVote(currentBlockVotes []Vote, stake uint64, voter, to string) []Vote { + + //if new(big.Int).SetUint64(stake).Cmp(d.config.MinVoterBalance) > 0 { + currentBlockVotes = append(currentBlockVotes, Vote{ + Voter: voter, + Candidate: to, + Stake: stake, + }) + //} + return currentBlockVotes +} + +func (d *Dpos) processEventConfirm(currentBlockConfirmations []Confirmation, c chain.Chain, txDataInfo []string, number uint64, tx *bc.Tx, confirmer string) []Confirmation { + if len(txDataInfo) > posEventConfirmNumber { + confirmedBlockNumber, err := strconv.Atoi(txDataInfo[posEventConfirmNumber]) + if err != nil || number-uint64(confirmedBlockNumber) > d.config.MaxSignerCount || number-uint64(confirmedBlockNumber) < 0 { + return currentBlockConfirmations + } + confirmedHeader, err := c.GetBlockByHeight(uint64(confirmedBlockNumber)) + if confirmedHeader == nil { + log.Info("Fail to get confirmedHeader") + return currentBlockConfirmations + } + confirmedHeaderExtra := HeaderExtra{} + if extraVanity+extraSeal > len(confirmedHeader.Extra) { + return currentBlockConfirmations + } + //err = rlp.DecodeBytes(confirmedHeader.Extra[extraVanity:len(confirmedHeader.Extra)-extraSeal], &confirmedHeaderExtra) + if err := json.Unmarshal(confirmedHeader.Extra[extraVanity:len(confirmedHeader.Extra)-extraSeal], &confirmedHeaderExtra); err != nil { + log.Info("Fail to decode parent header", "err", err) + return currentBlockConfirmations + } + for _, s := range confirmedHeaderExtra.SignerQueue { + if s == confirmer { + currentBlockConfirmations = append(currentBlockConfirmations, Confirmation{ + Signer: confirmer, + BlockNumber: uint64(confirmedBlockNumber), + }) + break + } + } + } + + return currentBlockConfirmations +} + +func (d *Dpos) processPredecessorVoter(modifyPredecessorVotes []Vote, stake uint64, voter, to string, snap *Snapshot) []Vote { + if stake > 0 { + if snap.isVoter(voter) { + modifyPredecessorVotes = append(modifyPredecessorVotes, Vote{ + Voter: voter, + Candidate: "", + Stake: stake, + }) + } + if snap.isVoter(to) { + modifyPredecessorVotes = append(modifyPredecessorVotes, Vote{ + Voter: to, + Candidate: "", + Stake: stake, + }) + } + + } + return modifyPredecessorVotes +} diff --git a/consensus/consensus/dpos/dpos.go b/consensus/consensus/dpos/dpos.go new file mode 100644 index 00000000..3ea6220d --- /dev/null +++ b/consensus/consensus/dpos/dpos.go @@ -0,0 +1,659 @@ +package dpos + +import ( + "bytes" + "encoding/json" + "errors" + "math/big" + "sync" + "time" + + lru "github.com/hashicorp/golang-lru" + log "github.com/sirupsen/logrus" + "github.com/vapor/chain" + "github.com/vapor/common" + "github.com/vapor/config" + "github.com/vapor/consensus" + "github.com/vapor/crypto" + "github.com/vapor/crypto/ed25519/chainkd" + "github.com/vapor/protocol" + "github.com/vapor/protocol/bc" + "github.com/vapor/protocol/bc/types" + "github.com/vapor/protocol/vm/vmutil" +) + +const ( + inMemorySnapshots = 128 // Number of recent vote snapshots to keep in memory + inMemorySignatures = 4096 // Number of recent block signatures to keep in memory + secondsPerYear = 365 * 24 * 3600 // Number of seconds for one year + checkpointInterval = 360 // About N hours if config.period is N + module = "dpos" +) + +//delegated-proof-of-stake protocol constants. +var ( + SignerBlockReward = big.NewInt(5e+18) // Block reward in wei for successfully mining a block first year + defaultEpochLength = uint64(3000000) // Default number of blocks after which vote's period of validity + defaultBlockPeriod = uint64(3) // Default minimum difference between two consecutive block's timestamps + defaultMaxSignerCount = uint64(21) // + //defaultMinVoterBalance = new(big.Int).Mul(big.NewInt(10000), big.NewInt(1e+18)) + defaultMinVoterBalance = uint64(0) + extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity + extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal + defaultDifficulty = big.NewInt(1) // Default difficulty + defaultLoopCntRecalculateSigners = uint64(10) // Default loop count to recreate signers from top tally + minerRewardPerThousand = uint64(618) // Default reward for miner in each block from block reward (618/1000) + candidateNeedPD = false // is new candidate need Proposal & Declare process +) + +var ( + // errUnknownBlock is returned when the list of signers is requested for a block + // that is not part of the local blockchain. + errUnknownBlock = errors.New("unknown block") + + // errMissingVanity is returned if a block's extra-data section is shorter than + // 32 bytes, which is required to store the signer vanity. + errMissingVanity = errors.New("extra-data 32 byte vanity prefix missing") + + // errMissingSignature is returned if a block's extra-data section doesn't seem + // to contain a 65 byte secp256k1 signature. + errMissingSignature = errors.New("extra-data 65 byte suffix signature missing") + + // errInvalidMixDigest is returned if a block's mix digest is non-zero. + errInvalidMixDigest = errors.New("non-zero mix digest") + + // errInvalidUncleHash is returned if a block contains an non-empty uncle list. + errInvalidUncleHash = errors.New("non empty uncle hash") + + // ErrInvalidTimestamp is returned if the timestamp of a block is lower than + // the previous block's timestamp + the minimum block period. + ErrInvalidTimestamp = errors.New("invalid timestamp") + + // errInvalidVotingChain is returned if an authorization list is attempted to + // be modified via out-of-range or non-contiguous headers. + errInvalidVotingChain = errors.New("invalid voting chain") + + // errUnauthorized is returned if a header is signed by a non-authorized entity. + errUnauthorized = errors.New("unauthorized") + + // errPunishedMissing is returned if a header calculate punished signer is wrong. + errPunishedMissing = errors.New("punished signer missing") + + // errWaitTransactions is returned if an empty block is attempted to be sealed + // on an instant chain (0 second period). It's important to refuse these as the + // block reward is zero, so an empty block just bloats the chain... fast. + errWaitTransactions = errors.New("waiting for transactions") + + // errUnclesNotAllowed is returned if uncles exists + errUnclesNotAllowed = errors.New("uncles not allowed") + + // errCreateSignerQueueNotAllowed is returned if called in (block number + 1) % maxSignerCount != 0 + errCreateSignerQueueNotAllowed = errors.New("create signer queue not allowed") + + // errInvalidSignerQueue is returned if verify SignerQueue fail + errInvalidSignerQueue = errors.New("invalid signer queue") + + // errSignerQueueEmpty is returned if no signer when calculate + errSignerQueueEmpty = errors.New("signer queue is empty") +) + +type Dpos struct { + config *config.DposConfig // Consensus engine configuration parameters + store protocol.Store // Database to store and retrieve snapshot checkpoints + recents *lru.ARCCache // Snapshots for recent block to speed up reorgs + signatures *lru.ARCCache // Signatures of recent blocks to speed up mining + signer string // Ethereum address of the signing key + signFn SignerFn // Signer function to authorize hashes with + signTxFn SignTxFn // Sign transaction function to sign tx + lock sync.RWMutex // Protects the signer fields + lcsc uint64 // Last confirmed side chain +} + +// SignerFn is a signer callback function to request a hash to be signed by a backing account. +type SignerFn func(string, []byte) ([]byte, error) + +// SignTxFn is a signTx +type SignTxFn func(string, *bc.Tx, *big.Int) (*bc.Tx, error) + +// +func ecrecover(header *types.BlockHeader, sigcache *lru.ARCCache, c chain.Chain) (string, error) { + + xpub := &chainkd.XPub{} + xpub.UnmarshalText(header.Coinbase) + derivedPK := xpub.PublicKey() + pubHash := crypto.Ripemd160(derivedPK) + address, err := common.NewAddressWitnessPubKeyHash(pubHash, &consensus.ActiveNetParams) + if err != nil { + return "", err + } + + return address.EncodeAddress(), nil +} + +// +func New(config *config.DposConfig, store protocol.Store) *Dpos { + conf := *config + if conf.Epoch == 0 { + conf.Epoch = defaultEpochLength + } + if conf.Period == 0 { + conf.Period = defaultBlockPeriod + } + if conf.MaxSignerCount == 0 { + conf.MaxSignerCount = defaultMaxSignerCount + } + if conf.MinVoterBalance == 0 { + conf.MinVoterBalance = defaultMinVoterBalance + } + // Allocate the snapshot caches and create the engine + recents, _ := lru.NewARC(inMemorySnapshots) + signatures, _ := lru.NewARC(inMemorySignatures) + return &Dpos{ + config: &conf, + store: store, + recents: recents, + signatures: signatures, + } +} + +// Authorize injects a private key into the consensus engine to mint new blocks with. +func (d *Dpos) Authorize(signer string /*, signFn SignerFn*/) { + d.lock.Lock() + defer d.lock.Unlock() + + d.signer = signer + //d.signFn = signFn +} + +// 从BLockHeader中获取到地址 +func (d *Dpos) Author(header *types.BlockHeader, c chain.Chain) (string, error) { + return ecrecover(header, d.signatures, c) +} + +func (d *Dpos) VerifyHeader(c chain.Chain, header *types.BlockHeader, seal bool) error { + return d.verifyCascadingFields(c, header, nil) +} + +func (d *Dpos) VerifyHeaders(c chain.Chain, headers []*types.BlockHeader, seals []bool) (chan<- struct{}, <-chan error) { + return nil, nil +} + +func (d *Dpos) VerifySeal(c chain.Chain, header *types.BlockHeader) error { + return nil +} + +func (d *Dpos) verifyHeader(c chain.Chain, header *types.BlockHeader, parents []*types.BlockHeader) error { + return nil +} + +func (d *Dpos) verifyCascadingFields(c chain.Chain, header *types.BlockHeader, parents []*types.BlockHeader) error { + // The genesis block is the always valid dead-end + height := header.Height + if height == 0 { + return nil + } + + var ( + parent *types.BlockHeader + err error + ) + + if len(parents) > 0 { + parent = parents[len(parents)-1] + } else { + parent, err = c.GetHeaderByHeight(height - 1) + if err != nil { + return err + } + } + + if parent == nil { + return errors.New("unknown ancestor") + } + + if _, err = d.snapshot(c, height-1, header.PreviousBlockHash, parents, nil, defaultLoopCntRecalculateSigners); err != nil { + return err + } + + return d.verifySeal(c, header, parents) +} + +// verifySeal checks whether the signature contained in the header satisfies the +// consensus protocol requirements. The method accepts an optional list of parent +// headers that aren't yet part of the local blockchain to generate the snapshots +// from. +func (d *Dpos) verifySeal(c chain.Chain, header *types.BlockHeader, parents []*types.BlockHeader) error { + height := header.Height + if height == 0 { + return errUnknownBlock + } + // Retrieve the snapshot needed to verify this header and cache it + snap, err := d.snapshot(c, height-1, header.PreviousBlockHash, parents, nil, defaultLoopCntRecalculateSigners) + if err != nil { + return err + } + + // Resolve the authorization key and check against signers + signer, err := ecrecover(header, d.signatures, c) + if err != nil { + return err + } + + if height > d.config.MaxSignerCount { + var ( + parent *types.BlockHeader + err error + ) + if len(parents) > 0 { + parent = parents[len(parents)-1] + } else { + if parent, err = c.GetHeaderByHeight(height - 1); err != nil { + return err + } + } + + //parent + xpub := &chainkd.XPub{} + xpub.UnmarshalText(parent.Coinbase) + derivedPK := xpub.PublicKey() + pubHash := crypto.Ripemd160(derivedPK) + parentCoinbase, err := common.NewAddressWitnessPubKeyHash(pubHash, &consensus.ActiveNetParams) + if err != nil { + return err + } + + //current + xpub.UnmarshalText(header.Coinbase) + derivedPK = xpub.PublicKey() + pubHash = crypto.Ripemd160(derivedPK) + currentCoinbase, err := common.NewAddressWitnessPubKeyHash(pubHash, &consensus.ActiveNetParams) + if err != nil { + return err + } + + parentHeaderExtra := HeaderExtra{} + if err = json.Unmarshal(parent.Extra[extraVanity:len(parent.Extra)-extraSeal], &parentHeaderExtra); err != nil { + return err + } + + currentHeaderExtra := HeaderExtra{} + if err = json.Unmarshal(header.Extra[extraVanity:len(header.Extra)-extraSeal], ¤tHeaderExtra); err != nil { + return err + } + + // verify signerqueue + if height%d.config.MaxSignerCount == 0 { + err := snap.verifySignerQueue(currentHeaderExtra.SignerQueue) + if err != nil { + return err + } + + } else { + for i := 0; i < int(d.config.MaxSignerCount); i++ { + if parentHeaderExtra.SignerQueue[i] != currentHeaderExtra.SignerQueue[i] { + return errInvalidSignerQueue + } + } + } + // verify missing signer for punish + parentSignerMissing := getSignerMissing(parentCoinbase.EncodeAddress(), currentCoinbase.EncodeAddress(), parentHeaderExtra) + if len(parentSignerMissing) != len(currentHeaderExtra.SignerMissing) { + return errPunishedMissing + } + for i, signerMissing := range currentHeaderExtra.SignerMissing { + if parentSignerMissing[i] != signerMissing { + return errPunishedMissing + } + } + + } + + if !snap.inturn(signer, header.Timestamp) { + return errUnauthorized + } + + return nil +} + +// Prepare implements consensus.Engine, preparing all the consensus fields of the header for running the transactions on top. +func (d *Dpos) Prepare(c chain.Chain, header *types.BlockHeader) error { + if d.config.GenesisTimestamp < uint64(time.Now().Unix()) { + return nil + } + + if header.Height == 1 { + for { + delay := time.Unix(int64(d.config.GenesisTimestamp-2), 0).Sub(time.Now()) + if delay <= time.Duration(0) { + log.WithFields(log.Fields{"module": module, "time": time.Now()}).Info("Ready for seal block") + break + } else if delay > time.Duration(d.config.Period)*time.Second { + delay = time.Duration(d.config.Period) * time.Second + } + log.WithFields(log.Fields{"module": module, "delay": time.Duration(time.Unix(int64(d.config.GenesisTimestamp-2), 0).Sub(time.Now()))}).Info("Waiting for seal block") + select { + case <-time.After(delay): + continue + } + } + } + return nil +} + +func (d *Dpos) Finalize(c chain.Chain, header *types.BlockHeader, txs []*bc.Tx) error { + height := header.Height + parent, err := c.GetHeaderByHeight(height - 1) + if parent == nil { + return err + } + //parent + var xpub chainkd.XPub + xpub.UnmarshalText(parent.Coinbase) + pubHash := crypto.Ripemd160(xpub.PublicKey()) + parentCoinbase, err := common.NewAddressWitnessPubKeyHash(pubHash, &consensus.ActiveNetParams) + if err != nil { + return err + } + + //current + xpub.UnmarshalText(header.Coinbase) + pubHash = crypto.Ripemd160(xpub.PublicKey()) + currentCoinbase, err := common.NewAddressWitnessPubKeyHash(pubHash, &consensus.ActiveNetParams) + if err != nil { + return err + } + + //header.Timestamp + t := new(big.Int).Add(new(big.Int).SetUint64(parent.Timestamp), new(big.Int).SetUint64(d.config.Period)) + header.Timestamp = t.Uint64() + + if header.Timestamp < uint64(time.Now().Unix()) { + header.Timestamp = uint64(time.Now().Unix()) + } + + if len(header.Extra) < extraVanity { + header.Extra = append(header.Extra, bytes.Repeat([]byte{0x00}, extraVanity-len(header.Extra))...) + } + + header.Extra = header.Extra[:extraVanity] + // genesisVotes write direct into snapshot, which number is 1 + var genesisVotes []*Vote + parentHeaderExtra := HeaderExtra{} + currentHeaderExtra := HeaderExtra{} + if height == 1 { + alreadyVote := make(map[string]struct{}) + for _, voter := range d.config.SelfVoteSigners { + if _, ok := alreadyVote[voter]; !ok { + genesisVotes = append(genesisVotes, &Vote{ + Voter: voter, + Candidate: voter, + Stake: 0, + //Stake: state.GetBalance(voter), + }) + alreadyVote[voter] = struct{}{} + } + } + } else { + parentHeaderExtraByte := parent.Extra[extraVanity : len(parent.Extra)-extraSeal] + if err := json.Unmarshal(parentHeaderExtraByte, &parentHeaderExtra); err != nil { + return err + } + currentHeaderExtra.ConfirmedBlockNumber = parentHeaderExtra.ConfirmedBlockNumber + currentHeaderExtra.SignerQueue = parentHeaderExtra.SignerQueue + currentHeaderExtra.LoopStartTime = parentHeaderExtra.LoopStartTime + currentHeaderExtra.SignerMissing = getSignerMissing(parentCoinbase.EncodeAddress(), currentCoinbase.EncodeAddress(), parentHeaderExtra) + } + + // calculate votes write into header.extra + currentHeaderExtra, err = d.processCustomTx(currentHeaderExtra, c, header, txs) + if err != nil { + return err + } + // Assemble the voting snapshot to check which votes make sense + snap, err := d.snapshot(c, height-1, header.PreviousBlockHash, nil, genesisVotes, defaultLoopCntRecalculateSigners) + if err != nil { + return err + } + + currentHeaderExtra.ConfirmedBlockNumber = snap.getLastConfirmedBlockNumber(currentHeaderExtra.CurrentBlockConfirmations).Uint64() + // write signerQueue in first header, from self vote signers in genesis block + if height == 1 { + currentHeaderExtra.LoopStartTime = d.config.GenesisTimestamp + for i := 0; i < int(d.config.MaxSignerCount); i++ { + currentHeaderExtra.SignerQueue = append(currentHeaderExtra.SignerQueue, d.config.SelfVoteSigners[i%len(d.config.SelfVoteSigners)]) + } + } + if height%d.config.MaxSignerCount == 0 { + //currentHeaderExtra.LoopStartTime = header.Time.Uint64() + currentHeaderExtra.LoopStartTime = currentHeaderExtra.LoopStartTime + d.config.Period*d.config.MaxSignerCount + // create random signersQueue in currentHeaderExtra by snapshot.Tally + currentHeaderExtra.SignerQueue = []string{} + newSignerQueue, err := snap.createSignerQueue() + if err != nil { + return err + } + + currentHeaderExtra.SignerQueue = newSignerQueue + + } + // encode header.extra + currentHeaderExtraEnc, err := json.Marshal(currentHeaderExtra) + if err != nil { + return err + } + header.Extra = append(header.Extra, currentHeaderExtraEnc...) + header.Extra = append(header.Extra, make([]byte, extraSeal)...) + return nil +} + +func (d *Dpos) Seal(c chain.Chain, block *types.Block) (*types.Block, error) { + header := block.BlockHeader + height := header.Height + if height == 0 { + return nil, errUnknownBlock + } + + if d.config.Period == 0 && len(block.Transactions) == 0 { + return nil, errWaitTransactions + } + // Bail out if we're unauthorized to sign a block + snap, err := d.snapshot(c, height-1, header.PreviousBlockHash, nil, nil, defaultLoopCntRecalculateSigners) + if err != nil { + return nil, err + } + if !snap.inturn(d.signer, header.Timestamp) { + return nil, errUnauthorized + } + + var xPrv chainkd.XPrv + if config.CommonConfig.Consensus.Dpos.XPrv == "" { + return nil, errors.New("Signer is empty") + } + xPrv.UnmarshalText([]byte(config.CommonConfig.Consensus.Dpos.XPrv)) + sign := xPrv.Sign(block.BlockCommitment.TransactionsMerkleRoot.Bytes()) + pubHash := crypto.Ripemd160(xPrv.XPub().PublicKey()) + + control, err := vmutil.P2WPKHProgram([]byte(pubHash)) + if err != nil { + return nil, err + } + + block.Proof = types.Proof{Sign: sign, ControlProgram: control} + return block, nil +} + +func (d *Dpos) IsSealer(c chain.Chain, hash bc.Hash, header *types.BlockHeader, headerTime uint64) (bool, error) { + var ( + snap *Snapshot + headers []*types.BlockHeader + ) + h := hash + height := header.Height + for snap == nil { + // If an in-memory snapshot was found, use that + if s, ok := d.recents.Get(h); ok { + snap = s.(*Snapshot) + break + } + // If an on-disk checkpoint snapshot can be found, use that + if height%checkpointInterval == 0 { + if s, err := loadSnapshot(d.config, d.signatures, d.store, h); err == nil { + log.WithFields(log.Fields{"func": "IsSealer", "number": height, "hash": h}).Warn("Loaded voting snapshot from disk") + snap = s + break + } else { + log.Warn("loadSnapshot:", err) + } + } + + if height == 0 { + genesis, err := c.GetHeaderByHeight(0) + if err != nil { + return false, err + } + var genesisVotes []*Vote + alreadyVote := make(map[string]struct{}) + for _, voter := range d.config.SelfVoteSigners { + if _, ok := alreadyVote[voter]; !ok { + genesisVotes = append(genesisVotes, &Vote{ + Voter: voter, + Candidate: voter, + Stake: 0, + //Stake: state.GetBalance(voter), + }) + alreadyVote[voter] = struct{}{} + } + } + snap = newSnapshot(d.config, d.signatures, genesis.Hash(), genesisVotes, defaultLoopCntRecalculateSigners) + if err := snap.store(d.store); err != nil { + return false, err + } + log.Info("Stored genesis voting snapshot to disk") + break + } + + header, err := c.GetHeaderByHeight(height) + if header == nil || err != nil { + return false, errors.New("unknown ancestor") + } + + height, h = height-1, header.PreviousBlockHash + } + + snap, err := snap.apply(headers) + if err != nil { + return false, err + } + + d.recents.Add(snap.Hash, snap) + + if snap != nil { + loopIndex := int((headerTime-snap.LoopStartTime)/snap.config.Period) % len(snap.Signers) + if loopIndex >= len(snap.Signers) { + return false, nil + } else if *snap.Signers[loopIndex] != d.signer { + return false, nil + + } + return true, nil + } else { + return false, nil + } +} + +// snapshot retrieves the authorization snapshot at a given point in time. +func (d *Dpos) snapshot(c chain.Chain, number uint64, hash bc.Hash, parents []*types.BlockHeader, genesisVotes []*Vote, lcrs uint64) (*Snapshot, error) { + + var ( + headers []*types.BlockHeader + snap *Snapshot + ) + h := hash + + for snap == nil { + // If an in-memory snapshot was found, use that + if s, ok := d.recents.Get(h); ok { + snap = s.(*Snapshot) + break + } + // If an on-disk checkpoint snapshot can be found, use that + if number%checkpointInterval == 0 { + if s, err := loadSnapshot(d.config, d.signatures, d.store, h); err == nil { + log.WithFields(log.Fields{"number": number, "hash": h}).Warn("Loaded voting snapshot from disk") + snap = s + break + } + } + if number == 0 { + genesis, err := c.GetHeaderByHeight(0) + if err != nil { + return nil, err + } + if err := d.VerifyHeader(c, genesis, false); err != nil { + return nil, err + } + + snap = newSnapshot(d.config, d.signatures, genesis.Hash(), genesisVotes, lcrs) + if err := snap.store(d.store); err != nil { + return nil, err + } + log.Info("Stored genesis voting snapshot to disk") + break + } + var header *types.BlockHeader + if len(parents) > 0 { + header = parents[len(parents)-1] + if header.Hash() != h || header.Height != number { + return nil, errors.New("unknown ancestor") + } + parents = parents[:len(parents)-1] + } else { + var err error + header, err = c.GetHeaderByHeight(number) + if header == nil || err != nil { + return nil, errors.New("unknown ancestor") + } + } + headers = append(headers, header) + number, h = number-1, header.PreviousBlockHash + } + + // Previous snapshot found, apply any pending headers on top of it + for i := 0; i < len(headers)/2; i++ { + headers[i], headers[len(headers)-1-i] = headers[len(headers)-1-i], headers[i] + } + snap, err := snap.apply(headers) + if err != nil { + return nil, err + } + d.recents.Add(snap.Hash, snap) + + // If we've generated a new checkpoint snapshot, save to disk + if snap.Number%checkpointInterval == 0 && len(headers) > 0 { + if err = snap.store(d.store); err != nil { + return nil, err + } + log.Info("Stored voting snapshot to disk", "number", snap.Number, "hash", snap.Hash) + } + return snap, err +} + +// Get the signer missing from last signer till header.Coinbase +func getSignerMissing(lastSigner string, currentSigner string, extra HeaderExtra) []string { + + var signerMissing []string + recordMissing := false + for _, signer := range extra.SignerQueue { + if signer == lastSigner { + recordMissing = true + continue + } + if signer == currentSigner { + break + } + if recordMissing { + signerMissing = append(signerMissing, signer) + } + } + return signerMissing +} diff --git a/consensus/consensus/dpos/signer_queue.go b/consensus/consensus/dpos/signer_queue.go new file mode 100644 index 00000000..5d3d440d --- /dev/null +++ b/consensus/consensus/dpos/signer_queue.go @@ -0,0 +1,154 @@ +package dpos + +import ( + "bytes" + "sort" + + "github.com/vapor/protocol/bc" +) + +type TallyItem struct { + addr string + stake uint64 +} + +type TallySlice []TallyItem + +func (s TallySlice) Len() int { return len(s) } +func (s TallySlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s TallySlice) Less(i, j int) bool { + //we need sort reverse, so ... + if s[i].stake > s[j].stake { + return true + + } else if s[i].stake < s[j].stake { + return false + } + // if the stake equal + //return bytes.Compare(s[i].addr.ScriptAddress(), s[j].addr.ScriptAddress()) > 0 + return s[i].addr > s[j].addr +} + +type SignerItem struct { + addr string + hash bc.Hash +} + +type SignerSlice []SignerItem + +func (s SignerSlice) Len() int { return len(s) } +func (s SignerSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s SignerSlice) Less(i, j int) bool { + return bytes.Compare(s[i].hash.Bytes(), s[j].hash.Bytes()) > 0 +} + +// verify the SignerQueue base on block hash +func (s *Snapshot) verifySignerQueue(signerQueue []string) error { + + if len(signerQueue) > int(s.config.MaxSignerCount) { + return errInvalidSignerQueue + } + sq, err := s.createSignerQueue() + if err != nil { + return err + } + if len(sq) == 0 || len(sq) != len(signerQueue) { + return errInvalidSignerQueue + } + for i, signer := range signerQueue { + if signer != sq[i] { + return errInvalidSignerQueue + } + } + + return nil +} + +func (s *Snapshot) buildTallySlice() TallySlice { + var tallySlice TallySlice + for address, stake := range s.Tally { + if !candidateNeedPD || s.isCandidate(address) { + if _, ok := s.Punished[address]; ok { + var creditWeight uint64 + if s.Punished[address] > defaultFullCredit-minCalSignerQueueCredit { + creditWeight = minCalSignerQueueCredit + } else { + creditWeight = defaultFullCredit - s.Punished[address] + } + tallySlice = append(tallySlice, TallyItem{address, stake * creditWeight}) + } else { + tallySlice = append(tallySlice, TallyItem{address, stake * defaultFullCredit}) + } + } + } + return tallySlice +} + +func (s *Snapshot) createSignerQueue() ([]string, error) { + + if (s.Number+1)%s.config.MaxSignerCount != 0 || s.Hash != s.HistoryHash[len(s.HistoryHash)-1] { + return nil, errCreateSignerQueueNotAllowed + } + + var signerSlice SignerSlice + var topStakeAddress []string + if (s.Number+1)%(s.config.MaxSignerCount*s.LCRS) == 0 { + // before recalculate the signers, clear the candidate is not in snap.Candidates + + // only recalculate signers from to tally per 10 loop, + // other loop end just reset the order of signers by block hash (nearly random) + tallySlice := s.buildTallySlice() + sort.Sort(TallySlice(tallySlice)) + queueLength := int(s.config.MaxSignerCount) + if queueLength > len(tallySlice) { + queueLength = len(tallySlice) + } + if queueLength == defaultOfficialMaxSignerCount && len(tallySlice) > defaultOfficialThirdLevelCount { + for i, tallyItem := range tallySlice[:defaultOfficialFirstLevelCount] { + signerSlice = append(signerSlice, SignerItem{tallyItem.addr, s.HistoryHash[len(s.HistoryHash)-1-i]}) + } + var signerSecondLevelSlice, signerThirdLevelSlice, signerLastLevelSlice SignerSlice + // 60% + for i, tallyItem := range tallySlice[defaultOfficialFirstLevelCount:defaultOfficialSecondLevelCount] { + signerSecondLevelSlice = append(signerSecondLevelSlice, SignerItem{tallyItem.addr, s.HistoryHash[len(s.HistoryHash)-1-i]}) + } + sort.Sort(SignerSlice(signerSecondLevelSlice)) + signerSlice = append(signerSlice, signerSecondLevelSlice[:6]...) + // 40% + for i, tallyItem := range tallySlice[defaultOfficialSecondLevelCount:defaultOfficialThirdLevelCount] { + signerThirdLevelSlice = append(signerThirdLevelSlice, SignerItem{tallyItem.addr, s.HistoryHash[len(s.HistoryHash)-1-i]}) + } + sort.Sort(SignerSlice(signerThirdLevelSlice)) + signerSlice = append(signerSlice, signerThirdLevelSlice[:4]...) + // choose 1 from last + for i, tallyItem := range tallySlice[defaultOfficialThirdLevelCount:] { + signerLastLevelSlice = append(signerLastLevelSlice, SignerItem{tallyItem.addr, s.HistoryHash[len(s.HistoryHash)-1-i]}) + } + sort.Sort(SignerSlice(signerLastLevelSlice)) + signerSlice = append(signerSlice, signerLastLevelSlice[0]) + + } else { + for i, tallyItem := range tallySlice[:queueLength] { + signerSlice = append(signerSlice, SignerItem{tallyItem.addr, s.HistoryHash[len(s.HistoryHash)-1-i]}) + } + + } + + } else { + for i, signer := range s.Signers { + signerSlice = append(signerSlice, SignerItem{*signer, s.HistoryHash[len(s.HistoryHash)-1-i]}) + } + } + + sort.Sort(SignerSlice(signerSlice)) + // Set the top candidates in random order base on block hash + if len(signerSlice) == 0 { + return nil, errSignerQueueEmpty + } + for i := 0; i < int(s.config.MaxSignerCount); i++ { + topStakeAddress = append(topStakeAddress, signerSlice[i%len(signerSlice)].addr) + } + + return topStakeAddress, nil + +} diff --git a/consensus/consensus/dpos/snapshot.go b/consensus/consensus/dpos/snapshot.go new file mode 100644 index 00000000..b63fa8e4 --- /dev/null +++ b/consensus/consensus/dpos/snapshot.go @@ -0,0 +1,624 @@ +package dpos + +import ( + "encoding/json" + "errors" + "fmt" + "math/big" + "sort" + "time" + + lru "github.com/hashicorp/golang-lru" + "github.com/vapor/common" + "github.com/vapor/config" + "github.com/vapor/consensus" + "github.com/vapor/crypto" + "github.com/vapor/crypto/ed25519/chainkd" + "github.com/vapor/protocol" + "github.com/vapor/protocol/bc" + "github.com/vapor/protocol/bc/types" +) + +const ( + defaultFullCredit = 1000 // no punished + missingPublishCredit = 100 // punished for missing one block seal + signRewardCredit = 10 // seal one block + autoRewardCredit = 1 // credit auto recover for each block + minCalSignerQueueCredit = 300 // when calculate the signerQueue + defaultOfficialMaxSignerCount = 21 // official max signer count + defaultOfficialFirstLevelCount = 10 // official first level , 100% in signer queue + defaultOfficialSecondLevelCount = 20 // official second level, 60% in signer queue + defaultOfficialThirdLevelCount = 30 // official third level, 40% in signer queue + // the credit of one signer is at least minCalSignerQueueCredit + candidateStateNormal = 1 + candidateMaxLen = 500 // if candidateNeedPD is false and candidate is more than candidateMaxLen, then minimum tickets candidates will be remove in each LCRS*loop +) + +var errIncorrectTallyCount = errors.New("incorrect tally count") + +type Snapshot struct { + config *config.DposConfig // Consensus engine configuration parameters + sigcache *lru.ARCCache // Cache of recent block signatures to speed up ecrecover + LCRS uint64 // Loop count to recreate signers from top tally + Period uint64 `json:"period"` // Period of seal each block + Number uint64 `json:"number"` // Block Number where the snapshot was created + ConfirmedNumber uint64 `json:"confirmed_number"` // Block Number confirmed when the snapshot was created + Hash bc.Hash `json:"hash"` // Block hash where the snapshot was created + HistoryHash []bc.Hash `json:"historyHash"` // Block hash list for two recent loop + Signers []*string `json:"signers"` // Signers queue in current header + Votes map[string]*Vote `json:"votes"` // All validate votes from genesis block + Tally map[string]uint64 `json:"tally"` // Stake for each candidate address + Voters map[string]uint64 `json:"voters"` // Block height for each voter address + Candidates map[string]uint64 `json:"candidates"` // Candidates for Signers (0- adding procedure 1- normal 2- removing procedure) + Punished map[string]uint64 `json:"punished"` // The signer be punished count cause of missing seal + Confirmations map[uint64][]string `json:"confirms"` // The signer confirm given block height + Proposals map[bc.Hash]*Proposal `json:"proposals"` // The Proposals going or success (failed proposal will be removed) + HeaderTime uint64 `json:"headerTime"` // Time of the current header + LoopStartTime uint64 `json:"loopStartTime"` // Start Time of the current loop +} + +// newSnapshot creates a new snapshot with the specified startup parameters. only ever use if for +// the genesis block. +func newSnapshot(config *config.DposConfig, sigcache *lru.ARCCache, hash bc.Hash, votes []*Vote, lcrs uint64) *Snapshot { + + snap := &Snapshot{ + config: config, + sigcache: sigcache, + LCRS: lcrs, + Period: config.Period, + Number: 0, + ConfirmedNumber: 0, + Hash: hash, + HistoryHash: []bc.Hash{}, + Signers: []*string{}, + Votes: make(map[string]*Vote), + Tally: make(map[string]uint64), + Voters: make(map[string]uint64), + Punished: make(map[string]uint64), + Candidates: make(map[string]uint64), + Confirmations: make(map[uint64][]string), + Proposals: make(map[bc.Hash]*Proposal), + HeaderTime: uint64(time.Now().Unix()) - 1, + LoopStartTime: config.GenesisTimestamp, + } + snap.HistoryHash = append(snap.HistoryHash, hash) + for _, vote := range votes { + // init Votes from each vote + snap.Votes[vote.Voter] = vote + // init Tally + _, ok := snap.Tally[vote.Candidate] + if !ok { + snap.Tally[vote.Candidate] = 0 + } + fmt.Println("newSnapshot", vote.Candidate, vote.Stake) + fmt.Println(snap.Tally) + snap.Tally[vote.Candidate] += vote.Stake + // init Voters + snap.Voters[vote.Voter] = 0 // block height is 0 , vote in genesis block + // init Candidates + snap.Candidates[vote.Voter] = candidateStateNormal + } + + for i := 0; i < int(config.MaxSignerCount); i++ { + snap.Signers = append(snap.Signers, &config.SelfVoteSigners[i%len(config.SelfVoteSigners)]) + } + + return snap +} + +// loadSnapshot loads an existing snapshot from the database. +func loadSnapshot(config *config.DposConfig, sigcache *lru.ARCCache, store protocol.Store, hash bc.Hash) (*Snapshot, error) { + data, err := store.Get(&hash) + if err != nil { + return nil, err + } + snap := new(Snapshot) + if err := json.Unmarshal(data, snap); err != nil { + return nil, err + } + snap.config = config + snap.sigcache = sigcache + return snap, nil +} + +// store inserts the snapshot into the database. +func (s *Snapshot) store(store protocol.Store) error { + data, err := json.Marshal(s) + if err != nil { + return err + } + return store.Set(&s.Hash, data) +} + +// copy creates a deep copy of the snapshot, though not the individual votes. +func (s *Snapshot) copy() *Snapshot { + cpy := &Snapshot{ + config: s.config, + sigcache: s.sigcache, + LCRS: s.LCRS, + Period: s.Period, + Number: s.Number, + ConfirmedNumber: s.ConfirmedNumber, + Hash: s.Hash, + HistoryHash: make([]bc.Hash, len(s.HistoryHash)), + + Signers: make([]*string, len(s.Signers)), + Votes: make(map[string]*Vote), + Tally: make(map[string]uint64), + Voters: make(map[string]uint64), + Candidates: make(map[string]uint64), + Punished: make(map[string]uint64), + Proposals: make(map[bc.Hash]*Proposal), + Confirmations: make(map[uint64][]string), + + HeaderTime: s.HeaderTime, + LoopStartTime: s.LoopStartTime, + } + copy(cpy.HistoryHash, s.HistoryHash) + copy(cpy.Signers, s.Signers) + for voter, vote := range s.Votes { + cpy.Votes[voter] = &Vote{ + Voter: vote.Voter, + Candidate: vote.Candidate, + Stake: vote.Stake, + } + } + for candidate, tally := range s.Tally { + cpy.Tally[candidate] = tally + } + for voter, number := range s.Voters { + cpy.Voters[voter] = number + } + for candidate, state := range s.Candidates { + cpy.Candidates[candidate] = state + } + for signer, cnt := range s.Punished { + cpy.Punished[signer] = cnt + } + for blockNumber, confirmers := range s.Confirmations { + cpy.Confirmations[blockNumber] = make([]string, len(confirmers)) + copy(cpy.Confirmations[blockNumber], confirmers) + } + for txHash, proposal := range s.Proposals { + cpy.Proposals[txHash] = proposal.copy() + } + + return cpy +} + +// apply creates a new authorization snapshot by applying the given headers to +// the original one. +func (s *Snapshot) apply(headers []*types.BlockHeader) (*Snapshot, error) { + // Allow passing in no headers for cleaner code + if len(headers) == 0 { + return s, nil + } + // Sanity check that the headers can be applied + for i := 0; i < len(headers)-1; i++ { + if headers[i+1].Height != headers[i].Height+1 { + return nil, errInvalidVotingChain + } + } + if headers[0].Height != s.Number+1 { + return nil, errInvalidVotingChain + } + // Iterate through the headers and create a new snapshot + snap := s.copy() + for _, header := range headers { + + // Resolve the authorization key and check against signers + coinbase, err := ecrecover(header, s.sigcache, nil) + if err != nil { + return nil, err + } + + xpub := &chainkd.XPub{} + xpub.UnmarshalText(header.Coinbase) + derivedPK := xpub.PublicKey() + pubHash := crypto.Ripemd160(derivedPK) + address, err := common.NewAddressWitnessPubKeyHash(pubHash, &consensus.ActiveNetParams) + if err != nil { + return nil, err + } + if coinbase != address.EncodeAddress() { + return nil, errUnauthorized + } + + headerExtra := HeaderExtra{} + if err := json.Unmarshal(header.Extra[extraVanity:len(header.Extra)-extraSeal], &headerExtra); err != nil { + return nil, err + } + + snap.HeaderTime = header.Timestamp + snap.LoopStartTime = headerExtra.LoopStartTime + snap.Signers = nil + for i := range headerExtra.SignerQueue { + snap.Signers = append(snap.Signers, &headerExtra.SignerQueue[i]) + } + + snap.ConfirmedNumber = headerExtra.ConfirmedBlockNumber + + if len(snap.HistoryHash) >= int(s.config.MaxSignerCount)*2 { + snap.HistoryHash = snap.HistoryHash[1 : int(s.config.MaxSignerCount)*2] + } + + snap.HistoryHash = append(snap.HistoryHash, header.Hash()) + + // deal the new confirmation in this block + snap.updateSnapshotByConfirmations(headerExtra.CurrentBlockConfirmations) + + // deal the new vote from voter + snap.updateSnapshotByVotes(headerExtra.CurrentBlockVotes, header.Height) + + // deal the voter which balance modified + //snap.updateSnapshotByMPVotes(headerExtra.ModifyPredecessorVotes) + + // deal the snap related with punished + //snap.updateSnapshotForPunish(headerExtra.SignerMissing, header.Height, header.Coinbase) + + // deal proposals + snap.updateSnapshotByProposals(headerExtra.CurrentBlockProposals, header.Height) + + // deal declares + snap.updateSnapshotByDeclares(headerExtra.CurrentBlockDeclares, header.Height) + + // calculate proposal result + snap.calculateProposalResult(header.Height) + + // check the len of candidate if not candidateNeedPD + if !candidateNeedPD && (snap.Number+1)%(snap.config.MaxSignerCount*snap.LCRS) == 0 && len(snap.Candidates) > candidateMaxLen { + snap.removeExtraCandidate() + } + + } + snap.Number += uint64(len(headers)) + snap.Hash = headers[len(headers)-1].Hash() + fmt.Println("updateSnapshotForExpired before", snap.Tally) + snap.updateSnapshotForExpired() + fmt.Println("updateSnapshotForExpired after", snap.Tally) + err := snap.verifyTallyCnt() + if err != nil { + return nil, err + } + return snap, nil +} + +func (s *Snapshot) removeExtraCandidate() { + // remove minimum tickets tally beyond candidateMaxLen + fmt.Println("removeExtraCandidate") + tallySlice := s.buildTallySlice() + sort.Sort(TallySlice(tallySlice)) + if len(tallySlice) > candidateMaxLen { + removeNeedTally := tallySlice[candidateMaxLen:] + for _, tallySlice := range removeNeedTally { + delete(s.Candidates, tallySlice.addr) + } + } +} + +func (s *Snapshot) verifyTallyCnt() error { + + tallyTarget := make(map[string]uint64) + for _, v := range s.Votes { + if _, ok := tallyTarget[v.Candidate]; ok { + tallyTarget[v.Candidate] = tallyTarget[v.Candidate] + v.Stake + } else { + tallyTarget[v.Candidate] = v.Stake + } + } + for address, tally := range s.Tally { + if targetTally, ok := tallyTarget[address]; ok && targetTally == tally { + continue + } else { + fmt.Println(address, "not find in votes") + } + } + + return nil +} + +func (s *Snapshot) updateSnapshotByDeclares(declares []Declare, headerHeight uint64) { + for _, declare := range declares { + if proposal, ok := s.Proposals[declare.ProposalHash]; ok { + // check the proposal enable status and valid block number + if proposal.ReceivedNumber+proposal.ValidationLoopCnt*s.config.MaxSignerCount < headerHeight || !s.isCandidate(declare.Declarer) { + continue + } + // check if this signer already declare on this proposal + alreadyDeclare := false + for _, v := range proposal.Declares { + if v.Declarer == declare.Declarer { + // this declarer already declare for this proposal + alreadyDeclare = true + break + } + } + if alreadyDeclare { + continue + } + // add declare to proposal + s.Proposals[declare.ProposalHash].Declares = append(s.Proposals[declare.ProposalHash].Declares, + &Declare{declare.ProposalHash, declare.Declarer, declare.Decision}) + + } + } +} + +func (s *Snapshot) calculateProposalResult(headerHeight uint64) { + + for hashKey, proposal := range s.Proposals { + // the result will be calculate at receiverdNumber + vlcnt + 1 + if proposal.ReceivedNumber+proposal.ValidationLoopCnt*s.config.MaxSignerCount+1 == headerHeight { + // calculate the current stake of this proposal + judegmentStake := big.NewInt(0) + for _, tally := range s.Tally { + judegmentStake.Add(judegmentStake, new(big.Int).SetUint64(tally)) + } + judegmentStake.Mul(judegmentStake, big.NewInt(2)) + judegmentStake.Div(judegmentStake, big.NewInt(3)) + // calculate declare stake + yesDeclareStake := big.NewInt(0) + for _, declare := range proposal.Declares { + if declare.Decision { + if _, ok := s.Tally[declare.Declarer]; ok { + yesDeclareStake.Add(yesDeclareStake, new(big.Int).SetUint64(s.Tally[declare.Declarer])) + } + } + } + if yesDeclareStake.Cmp(judegmentStake) > 0 { + // process add candidate + switch proposal.ProposalType { + case proposalTypeCandidateAdd: + if candidateNeedPD { + s.Candidates[s.Proposals[hashKey].Candidate] = candidateStateNormal + } + case proposalTypeCandidateRemove: + if _, ok := s.Candidates[proposal.Candidate]; ok && candidateNeedPD { + delete(s.Candidates, proposal.Candidate) + } + case proposalTypeMinerRewardDistributionModify: + minerRewardPerThousand = s.Proposals[hashKey].MinerRewardPerThousand + + } + } + + } + + } + +} + +func (s *Snapshot) updateSnapshotByProposals(proposals []Proposal, headerHeight uint64) { + for _, proposal := range proposals { + proposal.ReceivedNumber = headerHeight + s.Proposals[proposal.Hash] = &proposal + } +} + +func (s *Snapshot) updateSnapshotForExpired() { + + // deal the expired vote + var expiredVotes []*Vote + for voterAddress, voteNumber := range s.Voters { + if s.Number-voteNumber > s.config.Epoch { + // clear the vote + if expiredVote, ok := s.Votes[voterAddress]; ok { + expiredVotes = append(expiredVotes, expiredVote) + } + } + } + // remove expiredVotes only enough voters left + if uint64(len(s.Voters)-len(expiredVotes)) >= s.config.MaxSignerCount { + for _, expiredVote := range expiredVotes { + s.Tally[expiredVote.Candidate] -= expiredVote.Stake + // TODO + /* + if s.Tally[expiredVote.Candidate] == 0 { + delete(s.Tally, expiredVote.Candidate) + } + delete(s.Votes, expiredVote.Voter) + delete(s.Voters, expiredVote.Voter) + */ + } + } + + // deal the expired confirmation + for blockNumber := range s.Confirmations { + if s.Number-blockNumber > s.config.MaxSignerCount { + delete(s.Confirmations, blockNumber) + } + } + + // TODO + /* + // remove 0 stake tally + for address, tally := range s.Tally { + if tally <= 0 { + delete(s.Tally, address) + } + } + */ +} + +func (s *Snapshot) updateSnapshotByConfirmations(confirmations []Confirmation) { + for _, confirmation := range confirmations { + _, ok := s.Confirmations[confirmation.BlockNumber] + if !ok { + s.Confirmations[confirmation.BlockNumber] = []string{} + } + addConfirmation := true + for _, address := range s.Confirmations[confirmation.BlockNumber] { + if confirmation.Signer == address { + addConfirmation = false + break + } + } + if addConfirmation == true { + s.Confirmations[confirmation.BlockNumber] = append(s.Confirmations[confirmation.BlockNumber], confirmation.Signer) + } + } +} + +func (s *Snapshot) updateSnapshotByVotes(votes []Vote, headerHeight uint64) { + fmt.Println("updateSnapshotByVotes start") + for _, vote := range votes { + // update Votes, Tally, Voters data + if lastVote, ok := s.Votes[vote.Voter]; ok { + fmt.Println("lastVote.Candidate:", lastVote.Candidate) + fmt.Println("lastVote.Stake:", lastVote.Stake) + fmt.Println(s.Tally[lastVote.Candidate]-lastVote.Stake, s.Tally[lastVote.Candidate]) + s.Tally[lastVote.Candidate] = s.Tally[lastVote.Candidate] - lastVote.Stake + fmt.Println(s.Tally) + fmt.Println(s.Tally[lastVote.Candidate]) + } + if _, ok := s.Tally[vote.Candidate]; ok { + fmt.Println("vote.Candidate:", vote.Candidate) + fmt.Println("vote.Stake:", vote.Stake) + s.Tally[vote.Candidate] = s.Tally[vote.Candidate] + vote.Stake + } else { + fmt.Println("111 vote.Candidate:", vote.Candidate) + fmt.Println("111 vote.Stake:", vote.Stake) + s.Tally[vote.Candidate] = vote.Stake + if !candidateNeedPD { + s.Candidates[vote.Candidate] = candidateStateNormal + } + } + s.Votes[vote.Voter] = &Vote{vote.Voter, vote.Candidate, vote.Stake} + s.Voters[vote.Voter] = headerHeight + } + fmt.Println(votes) + fmt.Println(s.Tally) + fmt.Println("updateSnapshotByVotes end") +} + +func (s *Snapshot) updateSnapshotByMPVotes(votes []Vote) { + fmt.Println("8888888888888888888888888888888888") + fmt.Println(s.Tally) + for _, txVote := range votes { + fmt.Println("updateSnapshotByMPVotesupdateSnapshotByMPVotesupdateSnapshotByMPVotes") + if lastVote, ok := s.Votes[txVote.Voter]; ok { + fmt.Println("txVote.Voter:", txVote.Voter) + fmt.Println("lastVote.Candidate:", lastVote.Candidate, ",lastVote.Stake:", lastVote.Stake) + fmt.Println("txVote.Stake:", txVote.Stake) + s.Tally[lastVote.Candidate] = s.Tally[lastVote.Candidate] - lastVote.Stake + s.Tally[lastVote.Candidate] = s.Tally[lastVote.Candidate] + txVote.Stake + s.Votes[txVote.Voter] = &Vote{Voter: txVote.Voter, Candidate: lastVote.Candidate, Stake: txVote.Stake} + fmt.Println(txVote.Voter, lastVote.Candidate, txVote.Stake) + // do not modify header number of snap.Voters + } + } + fmt.Println(s.Tally) + fmt.Println("999999999999999999999999999999999") +} + +func (s *Snapshot) updateSnapshotForPunish(signerMissing []string, headerNumber uint64, coinbase string) { + // set punished count to half of origin in Epoch + /* + if headerNumber.Uint64()%s.config.Epoch == 0 { + for bePublished := range s.Punished { + if count := s.Punished[bePublished] / 2; count > 0 { + s.Punished[bePublished] = count + } else { + delete(s.Punished, bePublished) + } + } + } + */ + // punish the missing signer + for _, signerMissing := range signerMissing { + if _, ok := s.Punished[signerMissing]; ok { + s.Punished[signerMissing] += missingPublishCredit + } else { + s.Punished[signerMissing] = missingPublishCredit + } + } + // reduce the punish of sign signer + if _, ok := s.Punished[coinbase]; ok { + + if s.Punished[coinbase] > signRewardCredit { + s.Punished[coinbase] -= signRewardCredit + } else { + delete(s.Punished, coinbase) + } + } + // reduce the punish for all punished + for signerEach := range s.Punished { + if s.Punished[signerEach] > autoRewardCredit { + s.Punished[signerEach] -= autoRewardCredit + } else { + delete(s.Punished, signerEach) + } + } +} + +// inturn returns if a signer at a given block height is in-turn or not. +func (s *Snapshot) inturn(signer string, headerTime uint64) bool { + for _, addr := range s.Signers { + fmt.Println("inturn [addr]:", *addr) + } + fmt.Println("signer:", signer) + // if all node stop more than period of one loop + loopIndex := int((headerTime-s.LoopStartTime)/s.config.Period) % len(s.Signers) + fmt.Println(headerTime-s.LoopStartTime, s.config.Period, len(s.Signers), loopIndex) + if loopIndex >= len(s.Signers) { + return false + } else if *s.Signers[loopIndex] != signer { + return false + + } + return true +} + +// check if address belong to voter +func (s *Snapshot) isVoter(address string) bool { + if _, ok := s.Voters[address]; ok { + return true + } + return false +} + +// check if address belong to candidate +func (s *Snapshot) isCandidate(address string) bool { + if _, ok := s.Candidates[address]; ok { + return true + } + return false +} + +// get last block number meet the confirm condition +func (s *Snapshot) getLastConfirmedBlockNumber(confirmations []Confirmation) *big.Int { + + cpyConfirmations := make(map[uint64][]string) + for blockNumber, confirmers := range s.Confirmations { + cpyConfirmations[blockNumber] = make([]string, len(confirmers)) + copy(cpyConfirmations[blockNumber], confirmers) + } + // update confirmation into snapshot + for _, confirmation := range confirmations { + _, ok := cpyConfirmations[confirmation.BlockNumber] + if !ok { + cpyConfirmations[confirmation.BlockNumber] = []string{} + } + addConfirmation := true + for _, address := range cpyConfirmations[confirmation.BlockNumber] { + if confirmation.Signer == address { + addConfirmation = false + break + } + } + if addConfirmation == true { + + cpyConfirmations[confirmation.BlockNumber] = append(cpyConfirmations[confirmation.BlockNumber], confirmation.Signer) + } + } + + i := s.Number + for ; i > s.Number-s.config.MaxSignerCount*2/3+1; i-- { + if confirmers, ok := cpyConfirmations[i]; ok { + if len(confirmers) > int(s.config.MaxSignerCount*2/3) { + return big.NewInt(int64(i)) + } + } + } + return big.NewInt(int64(i)) +} diff --git a/mining/consensus/errors.go b/consensus/consensus/errors.go similarity index 100% rename from mining/consensus/errors.go rename to consensus/consensus/errors.go diff --git a/database/leveldb/store.go b/database/leveldb/store.go index 9a12e6cd..3e5d4cb6 100644 --- a/database/leveldb/store.go +++ b/database/leveldb/store.go @@ -26,6 +26,7 @@ var ( blockHeaderPrefix = []byte("BH:") txStatusPrefix = []byte("BTS:") cliamTxPreFix = []byte("Claim:") + dposPreFix = []byte("dpos:") ) func loadBlockStoreStateJSON(db dbm.DB) *protocol.BlockStoreState { @@ -67,6 +68,10 @@ func calcClaimTxKey(hash *bc.Hash) []byte { return append(cliamTxPreFix, hash.Bytes()...) } +func calcDPosKey(hash *bc.Hash) []byte { + return append(dposPreFix, hash.Bytes()...) +} + // GetBlock return the block by given hash func GetBlock(db dbm.DB, hash *bc.Hash) *types.Block { bytez := db.Get(calcBlockKey(hash)) @@ -237,3 +242,18 @@ func (s *Store) SetWithdrawSpent(hash *bc.Hash) { batch.Set(calcClaimTxKey(hash), []byte("1")) batch.Write() } + +func (s *Store) Set(hash *bc.Hash, data []byte) error { + batch := s.db.NewBatch() + batch.Set(calcDPosKey(hash), data) + batch.Write() + return nil +} + +func (s *Store) Get(hash *bc.Hash) ([]byte, error) { + data := s.db.Get(calcDPosKey(hash)) + if data == nil { + return nil, errors.New("can't find the snapshot by given hash") + } + return data, nil +} diff --git a/database/leveldb/utxo_view.go b/database/leveldb/utxo_view.go index 82e05a4e..2bb9473d 100644 --- a/database/leveldb/utxo_view.go +++ b/database/leveldb/utxo_view.go @@ -26,7 +26,6 @@ func getTransactionsUtxo(db dbm.DB, view *state.UtxoViewpoint, txs []*bc.Tx) err if data == nil { continue } - var utxo storage.UtxoEntry if err := proto.Unmarshal(data, &utxo); err != nil { return errors.Wrap(err, "unmarshaling utxo entry") diff --git a/mining/consensus/consensus.go b/mining/consensus/consensus.go deleted file mode 100644 index cdabdf9c..00000000 --- a/mining/consensus/consensus.go +++ /dev/null @@ -1,82 +0,0 @@ -package consensus - -import ( - "math/big" - - "github.com/vapor/common" - "github.com/vapor/protocol/bc" - "github.com/vapor/protocol/bc/types" -) - -// ChainReader defines a small collection of methods needed to access the local -// blockchain during header and/or uncle verification. -type ChainReader interface { - // Config retrieves the blockchain's chain configuration. - //Config() *params.ChainConfig - - // CurrentHeader retrieves the current header from the local chain. - CurrentHeader() *types.BlockHeader - - // GetHeader retrieves a block header from the database by hash and number. - GetHeader(hash bc.Hash, number uint64) *types.BlockHeader - - // GetHeaderByNumber retrieves a block header from the database by number. - GetHeaderByNumber(number uint64) *types.BlockHeader - - // GetHeaderByHash retrieves a block header from the database by its hash. - GetHeaderByHash(hash bc.Hash) *types.BlockHeader - - // GetBlock retrieves a block from the database by hash and number. - GetBlock(hash bc.Hash, number uint64) *types.Block -} - -// Engine is an algorithm agnostic consensus engine. -type Engine interface { - // Author retrieves the Ethereum address of the account that minted the given - // block, which may be different from the header's coinbase if a consensus - // engine is based on signatures. - Author(header *types.BlockHeader) (common.Address, error) - - // VerifyHeader checks whether a header conforms to the consensus rules of a - // given engine. Verifying the seal may be done optionally here, or explicitly - // via the VerifySeal method. - VerifyHeader(chain ChainReader, header *types.BlockHeader, seal bool) error - - // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers - // concurrently. The method returns a quit channel to abort the operations and - // a results channel to retrieve the async verifications (the order is that of - // the input slice). - VerifyHeaders(chain ChainReader, headers []*types.BlockHeader, seals []bool) (chan<- struct{}, <-chan error) - - // VerifyUncles verifies that the given block's uncles conform to the consensus - // rules of a given engine. - VerifyUncles(chain ChainReader, block *types.Block) error - - // VerifySeal checks whether the crypto seal on a header is valid according to - // the consensus rules of the given engine. - VerifySeal(chain ChainReader, header *types.BlockHeader) error - - // Prepare initializes the consensus fields of a block header according to the - // rules of a particular engine. The changes are executed inline. - Prepare(chain ChainReader, header *types.BlockHeader) error - - // Finalize runs any post-transaction state modifications (e.g. block rewards) - // and assembles the final block. - // Note: The block header and state database might be updated to reflect any - // consensus rules that happen at finalization (e.g. block rewards). - /* - Finalize(chain ChainReader, header *types.BlockHeader , state *state.StateDB, txs []*types.Transaction, - uncles []*types.BlockHeader , receipts []*types.Receipt) (*types.Block, error) - */ - - // Seal generates a new block for the given input block with the local miner's - // seal place on top. - Seal(chain ChainReader, block *types.Block, stop <-chan struct{}) (*types.Block, error) - - // CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty - // that a new block should have. - CalcDifficulty(chain ChainReader, time uint64, parent *types.BlockHeader) *big.Int - - // APIs returns the RPC APIs this consensus engine provides. - //APIs(chain ChainReader) []rpc.API -} diff --git a/mining/consensus/dpos/custom_tx.go b/mining/consensus/dpos/custom_tx.go deleted file mode 100644 index c0e7581b..00000000 --- a/mining/consensus/dpos/custom_tx.go +++ /dev/null @@ -1 +0,0 @@ -package dpos diff --git a/mining/consensus/dpos/dpos.go b/mining/consensus/dpos/dpos.go deleted file mode 100644 index 7ab09d98..00000000 --- a/mining/consensus/dpos/dpos.go +++ /dev/null @@ -1,198 +0,0 @@ -package dpos - -import ( - "errors" - "fmt" - "math/big" - "sync" - "time" - - "github.com/go-loom/types" - lru "github.com/hashicorp/golang-lru" - log "github.com/sirupsen/logrus" - "github.com/vapor/common" - "github.com/vapor/config" - "github.com/vapor/protocol" - "github.com/vapor/protocol/bc" -) - -const ( - inMemorySnapshots = 128 // Number of recent vote snapshots to keep in memory - inMemorySignatures = 4096 // Number of recent block signatures to keep in memory - secondsPerYear = 365 * 24 * 3600 // Number of seconds for one year - checkpointInterval = 360 // About N hours if config.period is N - module = "dpos" -) - -//delegated-proof-of-stake protocol constants. -var ( - SignerBlockReward = big.NewInt(5e+18) // Block reward in wei for successfully mining a block first year - defaultEpochLength = uint64(3000000) // Default number of blocks after which vote's period of validity - defaultBlockPeriod = uint64(3) // Default minimum difference between two consecutive block's timestamps - defaultMaxSignerCount = uint64(21) // - defaultMinVoterBalance = new(big.Int).Mul(big.NewInt(10000), big.NewInt(1e+18)) - extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity - extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal - defaultDifficulty = big.NewInt(1) // Default difficulty - defaultLoopCntRecalculateSigners = uint64(10) // Default loop count to recreate signers from top tally - minerRewardPerThousand = uint64(618) // Default reward for miner in each block from block reward (618/1000) - candidateNeedPD = false // is new candidate need Proposal & Declare process -) - -var ( - // errUnknownBlock is returned when the list of signers is requested for a block - // that is not part of the local blockchain. - errUnknownBlock = errors.New("unknown block") - - // errMissingVanity is returned if a block's extra-data section is shorter than - // 32 bytes, which is required to store the signer vanity. - errMissingVanity = errors.New("extra-data 32 byte vanity prefix missing") - - // errMissingSignature is returned if a block's extra-data section doesn't seem - // to contain a 65 byte secp256k1 signature. - errMissingSignature = errors.New("extra-data 65 byte suffix signature missing") - - // errInvalidMixDigest is returned if a block's mix digest is non-zero. - errInvalidMixDigest = errors.New("non-zero mix digest") - - // errInvalidUncleHash is returned if a block contains an non-empty uncle list. - errInvalidUncleHash = errors.New("non empty uncle hash") - - // ErrInvalidTimestamp is returned if the timestamp of a block is lower than - // the previous block's timestamp + the minimum block period. - ErrInvalidTimestamp = errors.New("invalid timestamp") - - // errInvalidVotingChain is returned if an authorization list is attempted to - // be modified via out-of-range or non-contiguous headers. - errInvalidVotingChain = errors.New("invalid voting chain") - - // errUnauthorized is returned if a header is signed by a non-authorized entity. - errUnauthorized = errors.New("unauthorized") - - // errPunishedMissing is returned if a header calculate punished signer is wrong. - errPunishedMissing = errors.New("punished signer missing") - - // errWaitTransactions is returned if an empty block is attempted to be sealed - // on an instant chain (0 second period). It's important to refuse these as the - // block reward is zero, so an empty block just bloats the chain... fast. - errWaitTransactions = errors.New("waiting for transactions") - - // errUnclesNotAllowed is returned if uncles exists - errUnclesNotAllowed = errors.New("uncles not allowed") - - // errCreateSignerQueueNotAllowed is returned if called in (block number + 1) % maxSignerCount != 0 - errCreateSignerQueueNotAllowed = errors.New("create signer queue not allowed") - - // errInvalidSignerQueue is returned if verify SignerQueue fail - errInvalidSignerQueue = errors.New("invalid signer queue") - - // errSignerQueueEmpty is returned if no signer when calculate - errSignerQueueEmpty = errors.New("signer queue is empty") -) - -type Dpos struct { - config *config.DposConfig // Consensus engine configuration parameters - store protocol.Store // Database to store and retrieve snapshot checkpoints - recents *lru.ARCCache // Snapshots for recent block to speed up reorgs - signatures *lru.ARCCache // Signatures of recent blocks to speed up mining - signer common.Address // Ethereum address of the signing key - signFn SignerFn // Signer function to authorize hashes with - signTxFn SignTxFn // Sign transaction function to sign tx - lock sync.RWMutex // Protects the signer fields - lcsc uint64 // Last confirmed side chain -} - -// SignerFn is a signer callback function to request a hash to be signed by a backing account. -type SignerFn func(common.Address, []byte) ([]byte, error) - -// SignTxFn is a signTx -type SignTxFn func(common.Address, *types.Transaction, *big.Int) (*types.Transaction, error) - -// -func ecrecover(header *types.BlockHeader, sigcache *lru.ARCCache) (common.Address, error) { - return nil, nil -} - -func sigHash(header *types.BlockHeader) (hash bc.Hash) { - return bc.Hash{} -} - -// -func New(config *config.DposConfig, store protocol.Store) *Dpos { - conf := *config - if conf.Epoch == 0 { - conf.Epoch = defaultEpochLength - } - if conf.Period == 0 { - conf.Period = defaultBlockPeriod - } - if conf.MaxSignerCount == 0 { - conf.MaxSignerCount = defaultMaxSignerCount - } - if conf.MinVoterBalance.Uint64() == 0 { - conf.MinVoterBalance = defaultMinVoterBalance - } - - // Allocate the snapshot caches and create the engine - recents, _ := lru.NewARC(inMemorySnapshots) - signatures, _ := lru.NewARC(inMemorySignatures) - return &Dpos{ - config: &conf, - store: store, - recents: recents, - signatures: signatures, - } -} - -// 从BLockHeader中获取到地址 -func (d *Dpos) Author(header *types.BlockHeader) (common.Address, error) { - return ecrecover(header, d.signatures) -} - -// Prepare implements consensus.Engine, preparing all the consensus fields of the header for running the transactions on top. -func (d *Dpos) Prepare(c *protocol.Chain, header *bc.BlockHeader) error { - if d.config.GenesisTimestamp < uint64(time.Now().Unix()) { - return nil - } - - if header.Height == 1 { - for { - delay := time.Unix(int64(d.config.GenesisTimestamp-2), 0).Sub(time.Now()) - if delay <= time.Duration(0) { - log.WithFields(log.Fields{"module": module, "time": time.Now()}).Info("Ready for seal block") - break - } else if delay > time.Duration(d.config.Period)*time.Second { - delay = time.Duration(d.config.Period) * time.Second - } - log.WithFields(log.Fields{"module": module, "delay": time.Duration(time.Unix(int64(d.config.GenesisTimestamp-2), 0).Sub(time.Now()))}).Info("Waiting for seal block") - select { - case <-time.After(delay): - continue - } - } - } - return nil -} - -func (d *Dpos) Finalize(c *protocol.Chain, header *bc.BlockHeader, txs []*types.Transaction) (*bc.Block, error) { - height := c.BestBlockHeight() - fmt.Println(height) - parent, err := c.GetHeaderByHeight(height - 1) - if parent == nil { - return nil, err - } - //header.Timestamp - t := new(big.Int).Add(new(big.Int).SetUint64(parent.Timestamp), new(big.Int).SetUint64(d.config.Period)) - header.Timestamp = t.Uint64() - - if header.Timestamp < uint64(time.Now().Unix()) { - header.Timestamp = uint64(time.Now().Unix()) - } - - - return nil, nil -} - -func (d *Dpos) Seal() { - -} diff --git a/mining/consensus/dpos/signer_queue.go b/mining/consensus/dpos/signer_queue.go deleted file mode 100644 index c0e7581b..00000000 --- a/mining/consensus/dpos/signer_queue.go +++ /dev/null @@ -1 +0,0 @@ -package dpos diff --git a/mining/consensus/dpos/snapshot.go b/mining/consensus/dpos/snapshot.go deleted file mode 100644 index c0e7581b..00000000 --- a/mining/consensus/dpos/snapshot.go +++ /dev/null @@ -1 +0,0 @@ -package dpos diff --git a/mining/miner/miner.go b/mining/miner/miner.go new file mode 100644 index 00000000..451687f4 --- /dev/null +++ b/mining/miner/miner.go @@ -0,0 +1,326 @@ +package miner + +import ( + "errors" + "sync" + "time" + + "github.com/vapor/config" + + log "github.com/sirupsen/logrus" + + "github.com/vapor/account" + "github.com/vapor/common" + "github.com/vapor/consensus" + engine "github.com/vapor/consensus/consensus" + "github.com/vapor/consensus/consensus/dpos" + "github.com/vapor/crypto" + "github.com/vapor/crypto/ed25519/chainkd" + "github.com/vapor/mining" + "github.com/vapor/protocol" + "github.com/vapor/protocol/bc" + "github.com/vapor/protocol/bc/types" + "github.com/vapor/protocol/vm/vmutil" +) + +const ( + maxNonce = ^uint64(0) // 2^64 - 1 + defaultNumWorkers = 1 + hashUpdateSecs = 1 + module = "miner" +) + +var ConsensusEngine engine.Engine + +// Miner creates blocks and searches for proof-of-work values. +type Miner struct { + sync.Mutex + chain *protocol.Chain + accountManager *account.Manager + txPool *protocol.TxPool + numWorkers uint64 + started bool + discreteMining bool + workerWg sync.WaitGroup + updateNumWorkers chan struct{} + quit chan struct{} + newBlockCh chan *bc.Hash + Authoritys map[string]string + position uint64 + engine engine.Engine +} + +func NewMiner(c *protocol.Chain, accountManager *account.Manager, txPool *protocol.TxPool, newBlockCh chan *bc.Hash, engine engine.Engine) *Miner { + authoritys := make(map[string]string) + var position uint64 + dpos, ok := engine.(*dpos.Dpos) + if !ok { + log.Error("Only the dpos engine was allowed") + return nil + } + dpos.Authorize(config.CommonConfig.Consensus.Dpos.Coinbase) + /* + for index, xpub := range consensus.ActiveNetParams.SignBlockXPubs { + pubHash := crypto.Ripemd160(xpub.PublicKey()) + address, _ := common.NewPeginAddressWitnessScriptHash(pubHash, &consensus.ActiveNetParams) + control, _ := vmutil.P2WPKHProgram([]byte(pubHash)) + //key := hex.EncodeToString(control) + //authoritys[key] = xpub.String() + authoritys[address.EncodeAddress()] = xpub.String() + if accountManager.IsLocalControlProgram(control) { + position = uint64(index) + dpos.Authorize(address.EncodeAddress()) + } + } + */ + //c.SetAuthoritys(authoritys) + //c.SetPosition(position) + c.SetConsensusEngine(dpos) + ConsensusEngine = dpos + return &Miner{ + chain: c, + accountManager: accountManager, + txPool: txPool, + numWorkers: defaultNumWorkers, + updateNumWorkers: make(chan struct{}), + newBlockCh: newBlockCh, + Authoritys: authoritys, + position: position, + engine: dpos, + } +} + +func (m *Miner) generateProof(block types.Block) (types.Proof, error) { + var xPrv chainkd.XPrv + if consensus.ActiveNetParams.Signer == "" { + return types.Proof{}, errors.New("Signer is empty") + } + xPrv.UnmarshalText([]byte(consensus.ActiveNetParams.Signer)) + sign := xPrv.Sign(block.BlockCommitment.TransactionsMerkleRoot.Bytes()) + pubHash := crypto.Ripemd160(xPrv.XPub().PublicKey()) + + address, _ := common.NewPeginAddressWitnessScriptHash(pubHash, &consensus.ActiveNetParams) + control, err := vmutil.P2WPKHProgram([]byte(pubHash)) + if err != nil { + return types.Proof{}, err + } + return types.Proof{Sign: sign, ControlProgram: control, Address: address.ScriptAddress()}, nil +} + +// generateBlocks is a worker that is controlled by the miningWorkerController. +// It is self contained in that it creates block templates and attempts to solve +// them while detecting when it is performing stale work and reacting +// accordingly by generating a new block template. When a block is solved, it +// is submitted. +// +// It must be run as a goroutine. +func (m *Miner) generateBlocks(quit chan struct{}) { + ticker := time.NewTicker(time.Second * hashUpdateSecs) + defer ticker.Stop() + +out: + for { + select { + case <-quit: + break out + default: + } + /* + engine, ok := m.engine.(*dpos.Dpos) + if !ok { + log.Error("Only the dpos engine was allowed") + return + } + + header := m.chain.BestBlockHeader() + isSeal, err := engine.IsSealer(m.chain, header.Hash(), header, uint64(time.Now().Unix())) + if err != nil { + log.WithFields(log.Fields{"module": module, "error": err}).Error("Determine whether seal is wrong") + continue + } + */ + isSeal := true + if isSeal { + block, err := mining.NewBlockTemplate1(m.chain, m.txPool, m.accountManager, m.engine) + if err != nil { + log.Errorf("Mining: failed on create NewBlockTemplate: %v", err) + time.Sleep(3 * time.Second) + continue + } + if block == nil { + time.Sleep(3 * time.Second) + continue + } + block, err = m.engine.Seal(m.chain, block) + if err != nil { + log.Errorf("Seal, %v", err) + continue + } + m.chain.SetConsensusEngine(m.engine) + if isOrphan, err := m.chain.ProcessBlock(block); err == nil { + log.WithFields(log.Fields{ + "height": block.BlockHeader.Height, + "isOrphan": isOrphan, + "tx": len(block.Transactions), + }).Info("Miner processed block") + + blockHash := block.Hash() + m.newBlockCh <- &blockHash + } else { + log.WithField("height", block.BlockHeader.Height).Errorf("Miner fail on ProcessBlock, %v", err) + } + } + time.Sleep(3 * time.Second) + } + + m.workerWg.Done() +} + +// miningWorkerController launches the worker goroutines that are used to +// generate block templates and solve them. It also provides the ability to +// dynamically adjust the number of running worker goroutines. +// +// It must be run as a goroutine. +func (m *Miner) miningWorkerController() { + // launchWorkers groups common code to launch a specified number of + // workers for generating blocks. + var runningWorkers []chan struct{} + launchWorkers := func(numWorkers uint64) { + for i := uint64(0); i < numWorkers; i++ { + quit := make(chan struct{}) + runningWorkers = append(runningWorkers, quit) + + m.workerWg.Add(1) + go m.generateBlocks(quit) + } + } + + // Launch the current number of workers by default. + runningWorkers = make([]chan struct{}, 0, m.numWorkers) + launchWorkers(m.numWorkers) + +out: + for { + select { + // Update the number of running workers. + case <-m.updateNumWorkers: + // No change. + numRunning := uint64(len(runningWorkers)) + if m.numWorkers == numRunning { + continue + } + + // Add new workers. + if m.numWorkers > numRunning { + launchWorkers(m.numWorkers - numRunning) + continue + } + + // Signal the most recently created goroutines to exit. + for i := numRunning - 1; i >= m.numWorkers; i-- { + close(runningWorkers[i]) + runningWorkers[i] = nil + runningWorkers = runningWorkers[:i] + } + + case <-m.quit: + for _, quit := range runningWorkers { + close(quit) + } + break out + } + } + + m.workerWg.Wait() +} + +// Start begins the CPU mining process as well as the speed monitor used to +// track hashing metrics. Calling this function when the CPU miner has +// already been started will have no effect. +// +// This function is safe for concurrent access. +func (m *Miner) Start() { + m.Lock() + defer m.Unlock() + + // Nothing to do if the miner is already running + if m.started { + return + } + + m.quit = make(chan struct{}) + go m.miningWorkerController() + + m.started = true + log.Infof("CPU miner started") +} + +// Stop gracefully stops the mining process by signalling all workers, and the +// speed monitor to quit. Calling this function when the CPU miner has not +// already been started will have no effect. +// +// This function is safe for concurrent access. +func (m *Miner) Stop() { + m.Lock() + defer m.Unlock() + + // Nothing to do if the miner is not currently running + if !m.started { + return + } + + close(m.quit) + m.started = false + log.Info("CPU miner stopped") +} + +// IsMining returns whether or not the CPU miner has been started and is +// therefore currenting mining. +// +// This function is safe for concurrent access. +func (m *Miner) IsMining() bool { + m.Lock() + defer m.Unlock() + + return m.started +} + +// SetNumWorkers sets the number of workers to create which solve blocks. Any +// negative values will cause a default number of workers to be used which is +// based on the number of processor cores in the system. A value of 0 will +// cause all CPU mining to be stopped. +// +// This function is safe for concurrent access. +func (m *Miner) SetNumWorkers(numWorkers int32) { + if numWorkers == 0 { + m.Stop() + } + + // Don't lock until after the first check since Stop does its own + // locking. + m.Lock() + defer m.Unlock() + + // Use default if provided value is negative. + if numWorkers < 0 { + m.numWorkers = defaultNumWorkers + } else { + m.numWorkers = uint64(numWorkers) + } + + // When the miner is already running, notify the controller about the + // the change. + if m.started { + m.updateNumWorkers <- struct{}{} + } +} + +// NumWorkers returns the number of workers which are running to solve blocks. +// +// This function is safe for concurrent access. +func (m *Miner) NumWorkers() int32 { + m.Lock() + defer m.Unlock() + + return int32(m.numWorkers) +} diff --git a/mining/mining.go b/mining/mining.go index e816bc46..e2dfcac5 100644 --- a/mining/mining.go +++ b/mining/mining.go @@ -5,11 +5,16 @@ import ( "strconv" "time" + "github.com/vapor/common" + log "github.com/sirupsen/logrus" "github.com/vapor/account" "github.com/vapor/blockchain/txbuilder" + "github.com/vapor/config" "github.com/vapor/consensus" + engine "github.com/vapor/consensus/consensus" + "github.com/vapor/crypto/ed25519/chainkd" "github.com/vapor/errors" "github.com/vapor/protocol" "github.com/vapor/protocol/bc" @@ -27,12 +32,18 @@ func createCoinbaseTx(accountManager *account.Manager, amount uint64, blockHeigh arbitrary := append([]byte{0x00}, []byte(strconv.FormatUint(blockHeight, 10))...) var script []byte - if accountManager == nil { - script, err = vmutil.DefaultCoinbaseProgram() - } else { - script, err = accountManager.GetCoinbaseControlProgram() - arbitrary = append(arbitrary, accountManager.GetCoinbaseArbitrary()...) - } + address, _ := common.DecodeAddress(config.CommonConfig.Consensus.Dpos.Coinbase, &consensus.ActiveNetParams) + redeemContract := address.ScriptAddress() + script, _ = vmutil.P2WPKHProgram(redeemContract) + /* + if accountManager == nil { + script, err = vmutil.DefaultCoinbaseProgram() + } else { + + script, err = accountManager.GetCoinbaseControlProgram() + arbitrary = append(arbitrary, accountManager.GetCoinbaseArbitrary()...) + } + */ if err != nil { return nil, err } @@ -82,15 +93,15 @@ func NewBlockTemplate(c *protocol.Chain, txPool *protocol.TxPool, accountManager preBlockHash := preBlockHeader.Hash() nextBlockHeight := preBlockHeader.Height + 1 - b = &types.Block{ - BlockHeader: types.BlockHeader{ - Version: 1, - Height: nextBlockHeight, - PreviousBlockHash: preBlockHash, - Timestamp: uint64(time.Now().Unix()), - BlockCommitment: types.BlockCommitment{}, - }, + header := types.BlockHeader{ + Version: 1, + Height: nextBlockHeight, + PreviousBlockHash: preBlockHash, + Timestamp: uint64(time.Now().Unix()), + BlockCommitment: types.BlockCommitment{}, } + + b = &types.Block{BlockHeader: header} bcBlock := &bc.Block{BlockHeader: &bc.BlockHeader{Height: nextBlockHeight}} b.Transactions = []*types.Tx{nil} @@ -139,6 +150,115 @@ func NewBlockTemplate(c *protocol.Chain, txPool *protocol.TxPool, accountManager if txFee == 0 { return nil, err } + + // creater coinbase transaction + b.Transactions[0], err = createCoinbaseTx(accountManager, txFee, nextBlockHeight) + if err != nil { + return nil, errors.Wrap(err, "fail on createCoinbaseTx") + } + txEntries[0] = b.Transactions[0].Tx + + b.BlockHeader.BlockCommitment.TransactionsMerkleRoot, err = types.TxMerkleRoot(txEntries) + if err != nil { + return nil, err + } + + b.BlockHeader.BlockCommitment.TransactionStatusHash, err = types.TxStatusMerkleRoot(txStatus.VerifyStatus) + return b, err +} + +// NewBlockTemplate returns a new block template that is ready to be solved +func NewBlockTemplate1(c *protocol.Chain, txPool *protocol.TxPool, accountManager *account.Manager, engine engine.Engine) (b *types.Block, err error) { + view := state.NewUtxoViewpoint() + txStatus := bc.NewTransactionStatus() + if err := txStatus.SetStatus(0, false); err != nil { + return nil, err + } + txEntries := []*bc.Tx{nil} + gasUsed := uint64(0) + txFee := uint64(0) + + // get preblock info for generate next block + preBlockHeader := c.BestBlockHeader() + preBlockHash := preBlockHeader.Hash() + nextBlockHeight := preBlockHeader.Height + 1 + + var xPrv chainkd.XPrv + if config.CommonConfig.Consensus.Dpos.XPrv == "" { + return nil, errors.New("Signer is empty") + } + xPrv.UnmarshalText([]byte(config.CommonConfig.Consensus.Dpos.XPrv)) + xpub, _ := xPrv.XPub().MarshalText() + + header := types.BlockHeader{ + Version: 1, + Height: nextBlockHeight, + PreviousBlockHash: preBlockHash, + Timestamp: uint64(time.Now().Unix()), + BlockCommitment: types.BlockCommitment{}, + Coinbase: xpub, + //Extra: make([]byte, 32+65), + } + + if err := engine.Prepare(c, &header); err != nil { + log.Error("Failed to prepare header for mining", "err", err) + return nil, err + } + + b = &types.Block{} + bcBlock := &bc.Block{BlockHeader: &bc.BlockHeader{Height: nextBlockHeight}} + b.Transactions = []*types.Tx{nil} + + txs := txPool.GetTransactions() + sort.Sort(byTime(txs)) + for _, txDesc := range txs { + tx := txDesc.Tx.Tx + gasOnlyTx := false + + if err := c.GetTransactionsUtxo(view, []*bc.Tx{tx}); err != nil { + blkGenSkipTxForErr(txPool, &tx.ID, err) + continue + } + gasStatus, err := validation.ValidateTx(tx, bcBlock) + if err != nil { + if !gasStatus.GasValid { + blkGenSkipTxForErr(txPool, &tx.ID, err) + continue + } + gasOnlyTx = true + } + + if gasUsed+uint64(gasStatus.GasUsed) > consensus.MaxBlockGas { + break + } + + if err := view.ApplyTransaction(bcBlock, tx, gasOnlyTx); err != nil { + blkGenSkipTxForErr(txPool, &tx.ID, err) + continue + } + + if err := txStatus.SetStatus(len(b.Transactions), gasOnlyTx); err != nil { + return nil, err + } + + b.Transactions = append(b.Transactions, txDesc.Tx) + txEntries = append(txEntries, tx) + gasUsed += uint64(gasStatus.GasUsed) + txFee += txDesc.Fee + if gasUsed == consensus.MaxBlockGas { + break + } + } + + if txFee == 0 { + return nil, nil + } + + if err := engine.Finalize(c, &header, txEntries[1:]); err != nil { + return nil, err + } + + b.BlockHeader = header // creater coinbase transaction b.Transactions[0], err = createCoinbaseTx(accountManager, txFee, nextBlockHeight) if err != nil { diff --git a/netsync/block_fetcher.go b/netsync/block_fetcher.go index b03d0098..f7f3fae2 100644 --- a/netsync/block_fetcher.go +++ b/netsync/block_fetcher.go @@ -4,6 +4,7 @@ import ( log "github.com/sirupsen/logrus" "gopkg.in/karalabe/cookiejar.v2/collections/prque" + "github.com/vapor/chain" "github.com/vapor/protocol/bc" ) @@ -16,7 +17,7 @@ const ( // blockFetcher is responsible for accumulating block announcements from various peers // and scheduling them for retrieval. type blockFetcher struct { - chain Chain + chain chain.Chain peers *peerSet newBlockCh chan *blockMsg @@ -25,7 +26,7 @@ type blockFetcher struct { } //NewBlockFetcher creates a block fetcher to retrieve blocks of the new mined. -func newBlockFetcher(chain Chain, peers *peerSet) *blockFetcher { +func newBlockFetcher(chain chain.Chain, peers *peerSet) *blockFetcher { f := &blockFetcher{ chain: chain, peers: peers, diff --git a/netsync/block_keeper.go b/netsync/block_keeper.go index 38d136b7..c633a8cc 100644 --- a/netsync/block_keeper.go +++ b/netsync/block_keeper.go @@ -6,6 +6,7 @@ import ( log "github.com/sirupsen/logrus" + "github.com/vapor/chain" "github.com/vapor/consensus" "github.com/vapor/errors" "github.com/vapor/mining/tensority" @@ -47,7 +48,7 @@ type headersMsg struct { } type blockKeeper struct { - chain Chain + chain chain.Chain peers *peerSet syncPeer *peer @@ -58,7 +59,7 @@ type blockKeeper struct { headerList *list.List } -func newBlockKeeper(chain Chain, peers *peerSet) *blockKeeper { +func newBlockKeeper(chain chain.Chain, peers *peerSet) *blockKeeper { bk := &blockKeeper{ chain: chain, peers: peers, diff --git a/netsync/handle.go b/netsync/handle.go index 40fd51d4..9ee466e7 100644 --- a/netsync/handle.go +++ b/netsync/handle.go @@ -13,6 +13,7 @@ import ( "github.com/tendermint/go-crypto" cmn "github.com/tendermint/tmlibs/common" + "github.com/vapor/chain" cfg "github.com/vapor/config" "github.com/vapor/consensus" "github.com/vapor/p2p" @@ -30,28 +31,13 @@ const ( maxFilterAddressCount = 1000 ) -// Chain is the interface for Bytom core -type Chain interface { - BestBlockHeader() *types.BlockHeader - BestBlockHeight() uint64 - CalcNextSeed(*bc.Hash) (*bc.Hash, error) - GetBlockByHash(*bc.Hash) (*types.Block, error) - GetBlockByHeight(uint64) (*types.Block, error) - GetHeaderByHash(*bc.Hash) (*types.BlockHeader, error) - GetHeaderByHeight(uint64) (*types.BlockHeader, error) - GetTransactionStatus(*bc.Hash) (*bc.TransactionStatus, error) - InMainChain(bc.Hash) bool - ProcessBlock(*types.Block) (bool, error) - ValidateTx(*types.Tx) (bool, error) -} - //SyncManager Sync Manager is responsible for the business layer information synchronization type SyncManager struct { sw *p2p.Switch genesisHash bc.Hash privKey crypto.PrivKeyEd25519 // local node's p2p key - chain Chain + chain chain.Chain txPool *core.TxPool blockFetcher *blockFetcher blockKeeper *blockKeeper @@ -65,7 +51,7 @@ type SyncManager struct { } //NewSyncManager create a sync manager -func NewSyncManager(config *cfg.Config, chain Chain, txPool *core.TxPool, newBlockCh chan *bc.Hash) (*SyncManager, error) { +func NewSyncManager(config *cfg.Config, chain chain.Chain, txPool *core.TxPool, newBlockCh chan *bc.Hash) (*SyncManager, error) { genesisHeader, err := chain.GetHeaderByHeight(0) if err != nil { return nil, err diff --git a/node/node.go b/node/node.go index 45c335e3..0c7cfa49 100644 --- a/node/node.go +++ b/node/node.go @@ -24,12 +24,15 @@ import ( "github.com/vapor/asset" "github.com/vapor/blockchain/pseudohsm" "github.com/vapor/blockchain/txfeed" + "github.com/vapor/common" cfg "github.com/vapor/config" "github.com/vapor/consensus" + engine "github.com/vapor/consensus/consensus" + "github.com/vapor/consensus/consensus/dpos" "github.com/vapor/crypto/ed25519/chainkd" "github.com/vapor/database/leveldb" "github.com/vapor/env" - "github.com/vapor/mining/cpuminer" + "github.com/vapor/mining/miner" "github.com/vapor/mining/miningpool" "github.com/vapor/net/websocket" "github.com/vapor/netsync" @@ -44,6 +47,8 @@ const ( maxNewBlockChSize = 1024 ) +var consensusEngine engine.Engine + type Node struct { cmn.BaseService @@ -59,9 +64,11 @@ type Node struct { api *api.API chain *protocol.Chain txfeed *txfeed.Tracker - cpuMiner *cpuminer.CPUMiner - miningPool *miningpool.MiningPool - miningEnable bool + //cpuMiner *cpuminer.CPUMiner + miner *miner.Miner + + miningPool *miningpool.MiningPool + miningEnable bool newBlockCh chan *bc.Hash } @@ -73,7 +80,9 @@ func NewNode(config *cfg.Config) *Node { } initLogFile(config) initActiveNetParams(config) + initConsensusConfig(config) initCommonConfig(config) + util.MainchainConfig = config.MainChain util.ValidatePegin = config.ValidatePegin // Get store @@ -111,10 +120,14 @@ func NewNode(config *cfg.Config) *Node { } if !config.Wallet.Disable { + address, err := common.DecodeAddress(config.Consensus.Dpos.Coinbase, &consensus.ActiveNetParams) + if err != nil { + cmn.Exit(cmn.Fmt("DecodeAddress: %v", err)) + } walletDB := dbm.NewDB("wallet", config.DBBackend, config.DBDir()) accounts = account.NewManager(walletDB, chain) assets = asset.NewRegistry(walletDB, chain) - wallet, err = w.NewWallet(walletDB, accounts, assets, hsm, chain) + wallet, err = w.NewWallet(walletDB, accounts, assets, hsm, chain, address) if err != nil { log.WithField("error", err).Error("init NewWallet") } @@ -158,7 +171,9 @@ func NewNode(config *cfg.Config) *Node { notificationMgr: notificationMgr, } - node.cpuMiner = cpuminer.NewCPUMiner(chain, accounts, txPool, newBlockCh) + //node.cpuMiner = cpuminer.NewCPUMiner(chain, accounts, txPool, newBlockCh) + consensusEngine = createConsensusEngine(config, store) + node.miner = miner.NewMiner(chain, accounts, txPool, newBlockCh, consensusEngine) node.miningPool = miningpool.NewMiningPool(chain, accounts, txPool, newBlockCh) node.BaseService = *cmn.NewBaseService(nil, "Node", node) @@ -261,7 +276,7 @@ func launchWebBrowser(port string) { } func (n *Node) initAndstartApiServer() { - n.api = api.NewAPI(n.syncManager, n.wallet, n.txfeed, n.cpuMiner, n.miningPool, n.chain, n.config, n.accessTokens, n.newBlockCh, n.notificationMgr) + n.api = api.NewAPI(n.syncManager, n.wallet, n.txfeed, n.miner, n.miningPool, n.chain, n.config, n.accessTokens, n.newBlockCh, n.notificationMgr) listenAddr := env.String("LISTEN", n.config.ApiAddress) env.Parse() @@ -274,7 +289,8 @@ func (n *Node) OnStart() error { n.miningEnable = false log.Error(err) } else { - n.cpuMiner.Start() + //n.cpuMiner.Start() + n.miner.Start() } } if !n.config.VaultMode { @@ -299,7 +315,7 @@ func (n *Node) OnStop() { n.notificationMgr.WaitForShutdown() n.BaseService.OnStop() if n.miningEnable { - n.cpuMiner.Stop() + n.miner.Stop() } if !n.config.VaultMode { n.syncManager.Stop() @@ -339,7 +355,7 @@ func bytomdRPCCheck() bool { json.Unmarshal(tmp, &blockHeader) hash := blockHeader.BlockHeader.Hash() if strings.Compare(consensus.ActiveNetParams.ParentGenesisBlockHash, hash.String()) != 0 { - log.Error("Invalid parent genesis block hash response via RPC. Contacting wrong parent daemon?") + log.Error("Invalid parent genesis block hash response via RPC. Contacting wrong parent daemon?", consensus.ActiveNetParams.ParentGenesisBlockHash, hash.String()) return false } break @@ -348,3 +364,39 @@ func bytomdRPCCheck() bool { return true } + +func initConsensusConfig(config *cfg.Config) { + if config.ConsensusConfigFile == "" { + // poa + } else { + // + file, err := os.Open(config.ConsensusConfigFile) + if err != nil { + cmn.Exit(cmn.Fmt("Failed to read consensus file: %v", err)) + } + defer file.Close() + + if err := json.NewDecoder(file).Decode(config); err != nil { + cmn.Exit(cmn.Fmt("invalid consensus file: %v", err)) + } + for _, v := range config.Consensus.Dpos.SelfVoteSigners { + address, err := common.DecodeAddress(v, &consensus.ActiveNetParams) + if err != nil { + cmn.Exit(cmn.Fmt("Address resolution failed: %v", err)) + } + config.Consensus.Dpos.Signers = append(config.Consensus.Dpos.Signers, address) + } + } +} + +func createConsensusEngine(config *cfg.Config, store protocol.Store) engine.Engine { + if config.Consensus.Dpos != nil { + return dpos.New(config.Consensus.Dpos, store) + } else { + return nil + } +} + +func GetConsensusEngine() engine.Engine { + return consensusEngine +} diff --git a/protocol/bc/bc.pb.go b/protocol/bc/bc.pb.go index 4f545e76..628ba87e 100644 --- a/protocol/bc/bc.pb.go +++ b/protocol/bc/bc.pb.go @@ -28,6 +28,7 @@ It has these top-level messages: Issuance Spend Claim + Dpos */ package bc @@ -270,6 +271,7 @@ func (m *ValueDestination) GetPosition() uint64 { type Proof struct { Sign []byte `protobuf:"bytes,1,opt,name=sign,proto3" json:"sign,omitempty"` ControlProgram []byte `protobuf:"bytes,2,opt,name=controlProgram,proto3" json:"controlProgram,omitempty"` + Address []byte `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` } func (m *Proof) Reset() { *m = Proof{} } @@ -291,6 +293,13 @@ func (m *Proof) GetControlProgram() []byte { return nil } +func (m *Proof) GetAddress() []byte { + if m != nil { + return m.Address + } + return nil +} + type BytomBlockHeader struct { Version uint64 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"` Height uint64 `protobuf:"varint,2,opt,name=height" json:"height,omitempty"` @@ -301,7 +310,6 @@ type BytomBlockHeader struct { Nonce uint64 `protobuf:"varint,7,opt,name=nonce" json:"nonce,omitempty"` Bits uint64 `protobuf:"varint,8,opt,name=bits" json:"bits,omitempty"` TransactionStatus *TransactionStatus `protobuf:"bytes,9,opt,name=transaction_status,json=transactionStatus" json:"transaction_status,omitempty"` - Extra []byte `protobuf:"bytes,10,opt,name=extra,proto3" json:"extra,omitempty"` } func (m *BytomBlockHeader) Reset() { *m = BytomBlockHeader{} } @@ -372,13 +380,6 @@ func (m *BytomBlockHeader) GetTransactionStatus() *TransactionStatus { return nil } -func (m *BytomBlockHeader) GetExtra() []byte { - if m != nil { - return m.Extra - } - return nil -} - type BlockHeader struct { Version uint64 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"` Height uint64 `protobuf:"varint,2,opt,name=height" json:"height,omitempty"` @@ -390,6 +391,8 @@ type BlockHeader struct { Bits uint64 `protobuf:"varint,8,opt,name=bits" json:"bits,omitempty"` TransactionStatus *TransactionStatus `protobuf:"bytes,9,opt,name=transaction_status,json=transactionStatus" json:"transaction_status,omitempty"` Proof *Proof `protobuf:"bytes,10,opt,name=Proof" json:"Proof,omitempty"` + Extra []byte `protobuf:"bytes,11,opt,name=extra,proto3" json:"extra,omitempty"` + Coinbase []byte `protobuf:"bytes,12,opt,name=coinbase,proto3" json:"coinbase,omitempty"` } func (m *BlockHeader) Reset() { *m = BlockHeader{} } @@ -467,6 +470,20 @@ func (m *BlockHeader) GetProof() *Proof { return nil } +func (m *BlockHeader) GetExtra() []byte { + if m != nil { + return m.Extra + } + return nil +} + +func (m *BlockHeader) GetCoinbase() []byte { + if m != nil { + return m.Coinbase + } + return nil +} + type TxHeader struct { Version uint64 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"` SerializedSize uint64 `protobuf:"varint,2,opt,name=serialized_size,json=serializedSize" json:"serialized_size,omitempty"` @@ -811,6 +828,86 @@ func (m *Claim) GetPeginwitness() [][]byte { return nil } +type Dpos struct { + SpentOutputId *Hash `protobuf:"bytes,1,opt,name=spent_output_id,json=spentOutputId" json:"spent_output_id,omitempty"` + WitnessDestination *ValueDestination `protobuf:"bytes,2,opt,name=witness_destination,json=witnessDestination" json:"witness_destination,omitempty"` + WitnessArguments [][]byte `protobuf:"bytes,3,rep,name=witness_arguments,json=witnessArguments,proto3" json:"witness_arguments,omitempty"` + Ordinal uint64 `protobuf:"varint,4,opt,name=ordinal" json:"ordinal,omitempty"` + Type uint32 `protobuf:"varint,5,opt,name=type" json:"type,omitempty"` + From string `protobuf:"bytes,6,opt,name=from" json:"from,omitempty"` + To string `protobuf:"bytes,7,opt,name=to" json:"to,omitempty"` + Stake uint64 `protobuf:"varint,8,opt,name=stake" json:"stake,omitempty"` + Data string `protobuf:"bytes,9,opt,name=data" json:"data,omitempty"` +} + +func (m *Dpos) Reset() { *m = Dpos{} } +func (m *Dpos) String() string { return proto.CompactTextString(m) } +func (*Dpos) ProtoMessage() {} +func (*Dpos) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *Dpos) GetSpentOutputId() *Hash { + if m != nil { + return m.SpentOutputId + } + return nil +} + +func (m *Dpos) GetWitnessDestination() *ValueDestination { + if m != nil { + return m.WitnessDestination + } + return nil +} + +func (m *Dpos) GetWitnessArguments() [][]byte { + if m != nil { + return m.WitnessArguments + } + return nil +} + +func (m *Dpos) GetOrdinal() uint64 { + if m != nil { + return m.Ordinal + } + return 0 +} + +func (m *Dpos) GetType() uint32 { + if m != nil { + return m.Type + } + return 0 +} + +func (m *Dpos) GetFrom() string { + if m != nil { + return m.From + } + return "" +} + +func (m *Dpos) GetTo() string { + if m != nil { + return m.To + } + return "" +} + +func (m *Dpos) GetStake() uint64 { + if m != nil { + return m.Stake + } + return 0 +} + +func (m *Dpos) GetData() string { + if m != nil { + return m.Data + } + return "" +} + func init() { proto.RegisterType((*Hash)(nil), "bc.Hash") proto.RegisterType((*Program)(nil), "bc.Program") @@ -832,74 +929,80 @@ func init() { proto.RegisterType((*Issuance)(nil), "bc.Issuance") proto.RegisterType((*Spend)(nil), "bc.Spend") proto.RegisterType((*Claim)(nil), "bc.Claim") + proto.RegisterType((*Dpos)(nil), "bc.Dpos") } func init() { proto.RegisterFile("bc.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1011 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0xcd, 0x6e, 0x1b, 0x37, - 0x10, 0x86, 0xb4, 0xab, 0xbf, 0x91, 0x63, 0xd9, 0xb4, 0x93, 0x2e, 0x82, 0x14, 0x09, 0x16, 0x48, - 0xdc, 0xa2, 0x80, 0x61, 0xcb, 0x69, 0x7b, 0xe9, 0xa1, 0x8e, 0xdd, 0x34, 0x3a, 0x18, 0x31, 0x68, - 0xc3, 0xd7, 0x05, 0xb5, 0x4b, 0xc9, 0x44, 0x57, 0x4b, 0x95, 0xe4, 0xaa, 0xb6, 0x6f, 0x7d, 0x88, - 0xa2, 0xa7, 0x3e, 0x47, 0x1f, 0xa1, 0xa7, 0x3e, 0x49, 0x5f, 0xa2, 0xe0, 0x2c, 0x57, 0x5a, 0xc9, - 0xce, 0x1f, 0x8a, 0xa2, 0x28, 0x90, 0xdb, 0xce, 0x70, 0xf8, 0xcd, 0xcc, 0x37, 0xc3, 0x21, 0x17, - 0xda, 0xc3, 0x78, 0x77, 0xaa, 0xa4, 0x91, 0xa4, 0x3e, 0x8c, 0xc3, 0x97, 0xe0, 0xbf, 0x62, 0xfa, - 0x92, 0xac, 0x43, 0x7d, 0xb6, 0x17, 0xd4, 0x9e, 0xd4, 0x3e, 0x6b, 0xd2, 0xfa, 0x6c, 0x0f, 0xe5, - 0xfd, 0xa0, 0xee, 0xe4, 0x7d, 0x94, 0xfb, 0x81, 0xe7, 0xe4, 0x3e, 0xca, 0x07, 0x81, 0xef, 0xe4, - 0x83, 0xf0, 0x1b, 0x68, 0x9d, 0x2a, 0x39, 0x56, 0x6c, 0x42, 0x3e, 0x05, 0x98, 0x4d, 0xa2, 0x19, - 0x57, 0x5a, 0xc8, 0x0c, 0x21, 0x7d, 0xda, 0x99, 0x4d, 0x2e, 0x0a, 0x05, 0x21, 0xe0, 0xc7, 0x32, - 0xe1, 0x88, 0xbd, 0x46, 0xf1, 0x3b, 0x1c, 0x40, 0xeb, 0x50, 0x6b, 0x6e, 0x06, 0xc7, 0xff, 0x38, - 0x90, 0x13, 0xe8, 0x22, 0xd4, 0xe1, 0x44, 0xe6, 0x99, 0x21, 0xcf, 0xa0, 0xcd, 0xac, 0x18, 0x89, - 0x04, 0x41, 0xbb, 0xfd, 0xee, 0xee, 0x30, 0xde, 0x75, 0xde, 0x68, 0x0b, 0x17, 0x07, 0x09, 0x79, - 0x00, 0x4d, 0x86, 0x3b, 0xd0, 0x95, 0x4f, 0x9d, 0x14, 0x8e, 0xa1, 0x87, 0xb6, 0xc7, 0x7c, 0x24, - 0x32, 0x61, 0x6c, 0x02, 0x5f, 0xc1, 0x86, 0xd0, 0x3a, 0x67, 0x59, 0xcc, 0xa3, 0x69, 0x91, 0x73, - 0x15, 0xda, 0xd1, 0x40, 0x7b, 0xa5, 0x51, 0xc9, 0xcb, 0x23, 0xf0, 0x13, 0x66, 0x18, 0x3a, 0xe8, - 0xf6, 0xdb, 0xd6, 0xd6, 0x52, 0x4f, 0x51, 0x1b, 0xa6, 0xd0, 0xbd, 0x60, 0x69, 0xce, 0xcf, 0x64, - 0xae, 0x62, 0x4e, 0x1e, 0x82, 0xa7, 0xf8, 0xc8, 0xe1, 0x2e, 0x6c, 0xad, 0x92, 0x3c, 0x85, 0xc6, - 0xcc, 0x9a, 0x3a, 0xa4, 0xde, 0x3c, 0xa1, 0x22, 0x67, 0x5a, 0xac, 0x92, 0x87, 0xd0, 0x9e, 0x4a, - 0x8d, 0x31, 0x23, 0x5f, 0x3e, 0x9d, 0xcb, 0xe1, 0x8f, 0xb0, 0x81, 0xde, 0x8e, 0xb9, 0x36, 0x22, - 0x63, 0x98, 0xd7, 0xbf, 0xec, 0xf2, 0x08, 0x1a, 0xa7, 0x4a, 0xca, 0x91, 0x6d, 0x00, 0x2d, 0xc6, - 0x45, 0x67, 0xac, 0x51, 0xfc, 0x26, 0xcf, 0x60, 0x3d, 0x96, 0x99, 0x51, 0x32, 0x75, 0x6c, 0xb9, - 0xf6, 0x58, 0xd1, 0x86, 0xbf, 0x7a, 0xb0, 0xf1, 0xe2, 0xda, 0xc8, 0xc9, 0x8b, 0x54, 0xc6, 0x3f, - 0xbc, 0xe2, 0x2c, 0xe1, 0x8a, 0x04, 0xd0, 0x5a, 0xee, 0xb6, 0x52, 0xb4, 0x55, 0xbd, 0xe4, 0x62, - 0x7c, 0x39, 0xaf, 0x6a, 0x21, 0x91, 0xe7, 0xb0, 0x39, 0x55, 0x7c, 0x26, 0x64, 0xae, 0xa3, 0xa1, - 0x45, 0xb2, 0xed, 0xe1, 0xad, 0x24, 0xde, 0x2b, 0x4d, 0xd0, 0xd7, 0x20, 0x21, 0x8f, 0xa0, 0x63, - 0xc4, 0x84, 0x6b, 0xc3, 0x26, 0x53, 0xec, 0x38, 0x9f, 0x2e, 0x14, 0xe4, 0x4b, 0xd8, 0x34, 0x8a, - 0x65, 0x9a, 0xc5, 0x36, 0x5d, 0x1d, 0x29, 0x29, 0x4d, 0xd0, 0x58, 0xc1, 0xdc, 0xa8, 0x9a, 0x50, - 0x29, 0x0d, 0xf9, 0x16, 0x3e, 0xa9, 0xe8, 0x22, 0x6d, 0x98, 0xc9, 0x75, 0x74, 0xc9, 0xf4, 0x65, - 0xd0, 0x5c, 0xd9, 0x7c, 0xbf, 0x62, 0x78, 0x86, 0x76, 0x78, 0x74, 0xb7, 0xa1, 0x91, 0xc9, 0x2c, - 0xe6, 0x41, 0x0b, 0x43, 0x2a, 0x04, 0xcb, 0xf2, 0x50, 0x18, 0x1d, 0xb4, 0x51, 0x89, 0xdf, 0xe4, - 0x18, 0xc8, 0x6d, 0x5f, 0x41, 0x07, 0xdd, 0xdc, 0xb7, 0x6e, 0xce, 0x57, 0x1d, 0xd0, 0xcd, 0x5b, - 0x3e, 0xad, 0x3f, 0x7e, 0x65, 0x14, 0x0b, 0x00, 0x4b, 0x54, 0x08, 0xe1, 0x6f, 0x1e, 0x74, 0x3f, - 0x16, 0xe5, 0x3f, 0x2b, 0xca, 0x63, 0x77, 0xba, 0xb0, 0x28, 0xdd, 0x7e, 0xc7, 0x4d, 0x22, 0x39, - 0xa2, 0x85, 0x3e, 0xfc, 0xa5, 0x06, 0xed, 0xf3, 0xab, 0x77, 0x16, 0x67, 0x07, 0x7a, 0x9a, 0x2b, - 0xc1, 0x52, 0x71, 0xc3, 0x93, 0x48, 0x8b, 0x1b, 0xee, 0xaa, 0xb4, 0xbe, 0x50, 0x9f, 0x89, 0x1b, - 0x6e, 0xa7, 0xbc, 0xa5, 0x39, 0x52, 0x2c, 0x1b, 0x73, 0x77, 0xd8, 0x91, 0x78, 0x6a, 0x15, 0x64, - 0x07, 0x40, 0x71, 0x9d, 0xa7, 0x76, 0xf0, 0xea, 0xc0, 0x7f, 0xe2, 0x2d, 0x91, 0xd6, 0x29, 0xd6, - 0x06, 0x89, 0x0e, 0xf7, 0x61, 0xfd, 0xfc, 0xea, 0x82, 0x2b, 0x31, 0xba, 0xa6, 0xa8, 0x24, 0x8f, - 0xa1, 0xeb, 0x08, 0x1f, 0x31, 0x91, 0x62, 0x80, 0x6d, 0x0a, 0x85, 0xea, 0x25, 0x13, 0x69, 0x38, - 0x82, 0xcd, 0x5b, 0x9c, 0xbc, 0x25, 0xa5, 0xaf, 0xe1, 0xde, 0x0c, 0xf1, 0x4b, 0x6e, 0xeb, 0x18, - 0x0d, 0x41, 0x6e, 0x97, 0x5c, 0xd3, 0xb5, 0xc2, 0xb0, 0x80, 0x0c, 0xff, 0xac, 0x81, 0x77, 0x92, - 0x5f, 0x91, 0xcf, 0xa1, 0xa5, 0x71, 0x2a, 0xeb, 0xa0, 0x86, 0x5b, 0x71, 0xfc, 0x55, 0xa6, 0x35, - 0x2d, 0xd7, 0xc9, 0x53, 0x68, 0x4d, 0x2b, 0x03, 0x6c, 0xe5, 0x4a, 0x28, 0xd7, 0xc8, 0xf7, 0xb0, - 0xfd, 0x93, 0x30, 0x19, 0xd7, 0x3a, 0x4a, 0x16, 0x13, 0x58, 0x07, 0x1e, 0xc2, 0x6f, 0xcf, 0xe1, - 0x2b, 0xe3, 0x99, 0x6e, 0xb9, 0x1d, 0x15, 0x9d, 0x26, 0x5f, 0xc0, 0x66, 0x09, 0xc4, 0xd4, 0x38, - 0x9f, 0xf0, 0xcc, 0x14, 0x6c, 0xaf, 0xd1, 0x0d, 0xb7, 0x70, 0x58, 0xea, 0x43, 0x09, 0xed, 0x23, - 0x29, 0xb2, 0x21, 0xd3, 0x9c, 0x7c, 0x07, 0x5b, 0x77, 0x44, 0xe0, 0x86, 0xff, 0xdd, 0x01, 0x90, - 0xdb, 0x01, 0xd8, 0xd3, 0xc7, 0xd4, 0x50, 0x18, 0xc5, 0xd4, 0xb5, 0x1b, 0xd9, 0x0b, 0x45, 0xf8, - 0x73, 0x0d, 0x9a, 0xaf, 0x73, 0x33, 0xcd, 0x0d, 0xd9, 0x81, 0x66, 0xc1, 0x91, 0x73, 0x71, 0x8b, - 0x42, 0xb7, 0x4c, 0x9e, 0x43, 0xcf, 0xcd, 0xfc, 0xe8, 0x2d, 0x4c, 0xae, 0xdc, 0x0b, 0xb6, 0xfa, - 0x52, 0x25, 0x22, 0x63, 0xa9, 0x6b, 0xc5, 0x52, 0x0c, 0x5f, 0x03, 0x50, 0x6e, 0x84, 0xe2, 0x96, - 0x83, 0xf7, 0x0f, 0xa3, 0x02, 0x58, 0x5f, 0x06, 0xfc, 0xbd, 0x0e, 0xed, 0x81, 0xbb, 0xda, 0x6d, - 0x9b, 0xe3, 0xc9, 0x2e, 0x66, 0xc3, 0xea, 0xd5, 0xd9, 0xc1, 0x35, 0x9c, 0x07, 0xef, 0x79, 0x81, - 0xbe, 0xa1, 0x2c, 0xde, 0x07, 0x96, 0xe5, 0x04, 0x82, 0x79, 0x5b, 0xe0, 0xeb, 0x27, 0x99, 0x3f, - 0x5f, 0x70, 0x46, 0x76, 0xfb, 0x5b, 0xf3, 0x00, 0x16, 0x2f, 0x1b, 0xfa, 0xa0, 0x6c, 0x99, 0x95, - 0x17, 0xcf, 0x9d, 0x5d, 0xd6, 0xb8, 0xbb, 0xcb, 0xaa, 0xcc, 0x35, 0x97, 0x99, 0xfb, 0xa3, 0x06, - 0x8d, 0xb3, 0x29, 0xcf, 0x12, 0xb2, 0x07, 0x3d, 0x3d, 0xe5, 0x99, 0x89, 0x24, 0x76, 0xc7, 0xe2, - 0x71, 0xb6, 0xe0, 0xee, 0x1e, 0x1a, 0x14, 0xdd, 0x33, 0x48, 0xde, 0x44, 0x4c, 0xfd, 0x03, 0x89, - 0xb9, 0x33, 0x13, 0xef, 0xdd, 0x99, 0xf8, 0xcb, 0x99, 0xfc, 0x55, 0x83, 0xc6, 0x51, 0xca, 0xc4, - 0xe4, 0xff, 0x9e, 0x09, 0x09, 0x61, 0xed, 0x94, 0x8f, 0x45, 0xe6, 0xb6, 0xb8, 0xaa, 0x2e, 0xe9, - 0x86, 0x4d, 0xfc, 0x5d, 0x38, 0xf8, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x25, 0x3b, 0x75, 0x3a, 0x3a, - 0x0c, 0x00, 0x00, + // 1091 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x86, 0x24, 0xea, 0x6f, 0x24, 0x5b, 0xf6, 0xda, 0x49, 0x89, 0x20, 0x45, 0x02, 0x02, 0x89, + 0x5b, 0x14, 0x30, 0xfc, 0x93, 0xb6, 0x97, 0x1e, 0xea, 0xc4, 0x4d, 0xa3, 0x83, 0x11, 0x63, 0x6d, + 0xf8, 0x56, 0x10, 0x2b, 0x72, 0x25, 0x2f, 0x22, 0x71, 0xd9, 0xdd, 0xa5, 0x6a, 0xfb, 0x96, 0x87, + 0xe8, 0x3b, 0xf4, 0x0d, 0xfa, 0x08, 0x3d, 0x15, 0x7d, 0x90, 0xbe, 0x44, 0xb1, 0xc3, 0xa5, 0x44, + 0xc9, 0xca, 0x1f, 0x8a, 0xa2, 0x08, 0x90, 0x1b, 0xe7, 0x67, 0xe7, 0xe7, 0x9b, 0xd9, 0x99, 0x25, + 0xb4, 0x06, 0xd1, 0x6e, 0xaa, 0xa4, 0x91, 0xa4, 0x3a, 0x88, 0x82, 0xe7, 0xe0, 0xbd, 0x60, 0xfa, + 0x92, 0xac, 0x43, 0x75, 0xba, 0xe7, 0x57, 0x1e, 0x56, 0xbe, 0x68, 0xd0, 0xea, 0x74, 0x0f, 0xe9, + 0x7d, 0xbf, 0xea, 0xe8, 0x7d, 0xa4, 0x0f, 0xfc, 0x9a, 0xa3, 0x0f, 0x90, 0x3e, 0xf4, 0x3d, 0x47, + 0x1f, 0x06, 0xdf, 0x41, 0xf3, 0x54, 0xc9, 0x91, 0x62, 0x13, 0xf2, 0x39, 0xc0, 0x74, 0x12, 0x4e, + 0xb9, 0xd2, 0x42, 0x26, 0x68, 0xd2, 0xa3, 0xed, 0xe9, 0xe4, 0x22, 0x67, 0x10, 0x02, 0x5e, 0x24, + 0x63, 0x8e, 0xb6, 0xbb, 0x14, 0xbf, 0x83, 0x3e, 0x34, 0x8f, 0xb4, 0xe6, 0xa6, 0x7f, 0xfc, 0xaf, + 0x03, 0x39, 0x81, 0x0e, 0x9a, 0x3a, 0x9a, 0xc8, 0x2c, 0x31, 0xe4, 0x31, 0xb4, 0x98, 0x25, 0x43, + 0x11, 0xa3, 0xd1, 0xce, 0x41, 0x67, 0x77, 0x10, 0xed, 0x3a, 0x6f, 0xb4, 0x89, 0xc2, 0x7e, 0x4c, + 0xee, 0x42, 0x83, 0xe1, 0x09, 0x74, 0xe5, 0x51, 0x47, 0x05, 0x23, 0xe8, 0xa1, 0xee, 0x31, 0x1f, + 0x8a, 0x44, 0x18, 0x9b, 0xc0, 0x37, 0xb0, 0x21, 0xb4, 0xce, 0x58, 0x12, 0xf1, 0x30, 0xcd, 0x73, + 0x2e, 0x9b, 0x76, 0x30, 0xd0, 0x5e, 0xa1, 0x54, 0xe0, 0x72, 0x1f, 0xbc, 0x98, 0x19, 0x86, 0x0e, + 0x3a, 0x07, 0x2d, 0xab, 0x6b, 0xa1, 0xa7, 0xc8, 0x0d, 0xc6, 0xd0, 0xb9, 0x60, 0xe3, 0x8c, 0x9f, + 0xc9, 0x4c, 0x45, 0x9c, 0xdc, 0x83, 0x9a, 0xe2, 0x43, 0x67, 0x77, 0xae, 0x6b, 0x99, 0xe4, 0x11, + 0xd4, 0xa7, 0x56, 0xd5, 0x59, 0xea, 0xcd, 0x12, 0xca, 0x73, 0xa6, 0xb9, 0x94, 0xdc, 0x83, 0x56, + 0x2a, 0x35, 0xc6, 0x8c, 0x78, 0x79, 0x74, 0x46, 0x07, 0x3f, 0xc3, 0x06, 0x7a, 0x3b, 0xe6, 0xda, + 0x88, 0x84, 0x61, 0x5e, 0xff, 0xb1, 0xcb, 0x9f, 0xa0, 0x7e, 0xaa, 0xa4, 0x1c, 0xda, 0x06, 0xd0, + 0x62, 0x94, 0x77, 0x46, 0x97, 0xe2, 0x37, 0x79, 0x0c, 0xeb, 0x91, 0x4c, 0x8c, 0x92, 0x63, 0x87, + 0x96, 0x6b, 0x8f, 0x25, 0x2e, 0xf1, 0xa1, 0xc9, 0xe2, 0x58, 0x71, 0xad, 0xd1, 0x7e, 0x97, 0x16, + 0x64, 0xf0, 0xba, 0x06, 0x1b, 0x4f, 0xaf, 0x8d, 0x9c, 0x3c, 0x1d, 0xcb, 0xe8, 0xd5, 0x0b, 0xce, + 0x62, 0xae, 0xac, 0xfa, 0x62, 0x1f, 0x16, 0xa4, 0xad, 0xf7, 0x25, 0x17, 0xa3, 0xcb, 0x59, 0xbd, + 0x73, 0x8a, 0x3c, 0x81, 0xcd, 0x54, 0xf1, 0xa9, 0x90, 0x99, 0x0e, 0x07, 0xd6, 0x92, 0x6d, 0x9c, + 0xda, 0x12, 0x24, 0xbd, 0x42, 0x05, 0x7d, 0xf5, 0x63, 0x72, 0x1f, 0xda, 0x46, 0x4c, 0xb8, 0x36, + 0x6c, 0x92, 0x62, 0x2f, 0x7a, 0x74, 0xce, 0x20, 0x5f, 0xc3, 0xa6, 0x51, 0x2c, 0xd1, 0x2c, 0xb2, + 0x40, 0xe8, 0x50, 0x49, 0x69, 0xfc, 0xfa, 0x92, 0xcd, 0x8d, 0xb2, 0x0a, 0x95, 0xd2, 0x90, 0xef, + 0xe1, 0xb3, 0x12, 0x2f, 0xd4, 0x86, 0x99, 0x4c, 0x87, 0x97, 0x4c, 0x5f, 0xfa, 0x8d, 0xa5, 0xc3, + 0x77, 0x4a, 0x8a, 0x67, 0xa8, 0x87, 0x97, 0x7a, 0x1b, 0xea, 0x89, 0x4c, 0x22, 0xee, 0x37, 0x31, + 0xa4, 0x9c, 0xb0, 0xf8, 0x0f, 0x84, 0xd1, 0x7e, 0x0b, 0x99, 0xf8, 0x4d, 0x8e, 0x81, 0xdc, 0xf6, + 0xe5, 0xb7, 0xd1, 0xcd, 0x1d, 0xeb, 0xe6, 0x7c, 0xd9, 0x01, 0xdd, 0xbc, 0xe5, 0x33, 0xf8, 0xab, + 0x06, 0x9d, 0x4f, 0xf0, 0xff, 0x5f, 0xf0, 0x93, 0x07, 0xee, 0x86, 0xf9, 0x80, 0x07, 0xdb, 0x6e, + 0x1a, 0xc9, 0x21, 0x75, 0x37, 0x6f, 0x1b, 0xea, 0xfc, 0xca, 0x28, 0xe6, 0x77, 0xf0, 0xee, 0xe4, + 0x84, 0xbd, 0xb4, 0x91, 0x14, 0xc9, 0x80, 0x69, 0xee, 0x77, 0x51, 0x30, 0xa3, 0x83, 0x5f, 0x2b, + 0xd0, 0x3a, 0xbf, 0x7a, 0x67, 0x39, 0x77, 0xa0, 0xa7, 0xb9, 0x12, 0x6c, 0x2c, 0x6e, 0x78, 0x1c, + 0x6a, 0x71, 0xc3, 0x5d, 0x5d, 0xd7, 0xe7, 0xec, 0x33, 0x71, 0xc3, 0xed, 0x6e, 0xb0, 0x85, 0x09, + 0x15, 0x4b, 0x46, 0xdc, 0x8d, 0x08, 0x2c, 0x15, 0xb5, 0x0c, 0xb2, 0x03, 0xa0, 0xb8, 0xce, 0xc6, + 0x76, 0x5c, 0x6b, 0xdf, 0x7b, 0x58, 0x5b, 0x80, 0xb9, 0x9d, 0xcb, 0xfa, 0xb1, 0x0e, 0xf6, 0x61, + 0xfd, 0xfc, 0xea, 0x82, 0x2b, 0x31, 0xbc, 0xa6, 0xc8, 0x24, 0x0f, 0xa0, 0xe3, 0x4a, 0x34, 0x64, + 0x62, 0x8c, 0x01, 0xb6, 0x28, 0xe4, 0xac, 0xe7, 0x4c, 0x8c, 0x83, 0x21, 0x6c, 0xde, 0x42, 0xf1, + 0x2d, 0x29, 0x7d, 0x0b, 0x6b, 0x53, 0xb4, 0x5f, 0x54, 0xa3, 0x8a, 0xd1, 0x10, 0xac, 0xc6, 0x82, + 0x6b, 0xda, 0xcd, 0x15, 0xdd, 0x25, 0xf8, 0xb3, 0x02, 0xb5, 0x93, 0xec, 0x8a, 0x7c, 0x09, 0x4d, + 0x8d, 0xb3, 0x5c, 0xfb, 0x15, 0x3c, 0x8a, 0x43, 0xb3, 0x34, 0xe3, 0x69, 0x21, 0x27, 0x8f, 0xa0, + 0x99, 0x96, 0xc6, 0xde, 0xd2, 0x22, 0x29, 0x64, 0xe4, 0x47, 0xd8, 0xfe, 0x45, 0x98, 0x84, 0x6b, + 0x1d, 0xc6, 0xf3, 0xb9, 0x6d, 0x27, 0xa1, 0x35, 0xbf, 0x3d, 0x33, 0x5f, 0x1a, 0xea, 0x74, 0xcb, + 0x9d, 0x28, 0xf1, 0x34, 0xf9, 0x0a, 0x36, 0x0b, 0x43, 0x4c, 0x8d, 0xb2, 0x09, 0x4f, 0x4c, 0x8e, + 0x76, 0x97, 0x6e, 0x38, 0xc1, 0x51, 0xc1, 0x0f, 0x24, 0xb4, 0x9e, 0xb9, 0x76, 0x20, 0x3f, 0xc0, + 0xd6, 0x8a, 0x08, 0xdc, 0xca, 0x58, 0x1d, 0x00, 0xb9, 0x1d, 0x80, 0xbd, 0xaf, 0x4c, 0x0d, 0x84, + 0x51, 0x4c, 0x5d, 0xbb, 0x41, 0x3f, 0x67, 0x04, 0xaf, 0x2b, 0xd0, 0x78, 0x99, 0x99, 0x34, 0x33, + 0x64, 0x07, 0x1a, 0x39, 0x46, 0xce, 0xc5, 0x2d, 0x08, 0x9d, 0x98, 0x3c, 0x81, 0x9e, 0xdb, 0x14, + 0xe1, 0x5b, 0x90, 0x5c, 0xb1, 0x4d, 0xa4, 0x8a, 0x45, 0xc2, 0xc6, 0xae, 0x15, 0x0b, 0x32, 0x78, + 0x09, 0x40, 0xb9, 0x11, 0x8a, 0x5b, 0x0c, 0xde, 0x3f, 0x8c, 0x92, 0xc1, 0xea, 0xa2, 0xc1, 0xdf, + 0xab, 0xd0, 0xea, 0xbb, 0x07, 0x81, 0x6d, 0x73, 0x9c, 0x05, 0xf9, 0x34, 0x59, 0x5e, 0xb8, 0x6d, + 0x94, 0xe1, 0x04, 0x79, 0xcf, 0xb5, 0xfb, 0x86, 0xb2, 0xd4, 0x3e, 0xb0, 0x2c, 0x27, 0xe0, 0xcf, + 0xda, 0x02, 0xdf, 0x4c, 0xf1, 0xec, 0xd1, 0x83, 0x53, 0xb5, 0x73, 0xb0, 0x35, 0x0b, 0x60, 0xfe, + 0x1e, 0xa2, 0x77, 0x8b, 0x96, 0x59, 0x7a, 0x27, 0xad, 0xec, 0xb2, 0xfa, 0xea, 0x2e, 0x2b, 0x23, + 0xd7, 0x58, 0x44, 0xee, 0x8f, 0x0a, 0xd4, 0xcf, 0x52, 0x9e, 0xc4, 0x64, 0x0f, 0x7a, 0x3a, 0xe5, + 0x89, 0x09, 0x25, 0x76, 0xc7, 0xfc, 0x49, 0x37, 0xc7, 0x6e, 0x0d, 0x15, 0xf2, 0xee, 0xe9, 0xc7, + 0x6f, 0x02, 0xa6, 0xfa, 0x81, 0xc0, 0xac, 0xcc, 0xa4, 0xf6, 0xee, 0x4c, 0xbc, 0xc5, 0x4c, 0xfe, + 0xae, 0x40, 0xfd, 0xd9, 0x98, 0x89, 0xc9, 0xc7, 0x9e, 0x09, 0x09, 0xa0, 0x7b, 0xca, 0x47, 0x22, + 0x71, 0x47, 0x5c, 0x55, 0x17, 0x78, 0xc1, 0x6f, 0x55, 0xf0, 0x8e, 0x53, 0xa9, 0x3f, 0xfa, 0x64, + 0x09, 0x78, 0xe6, 0x3a, 0xe5, 0xf8, 0x64, 0x58, 0xa3, 0xf8, 0x6d, 0x79, 0x43, 0x25, 0x27, 0xd8, + 0xab, 0x6d, 0x8a, 0xdf, 0xf6, 0x4f, 0xc4, 0x48, 0xdc, 0xf5, 0x6d, 0x5a, 0x35, 0xd2, 0x6e, 0x5b, + 0x6d, 0xd8, 0x2b, 0xee, 0x36, 0x7d, 0x4e, 0xd8, 0x93, 0xf8, 0x17, 0xd0, 0xce, 0x4f, 0xda, 0xef, + 0x41, 0x03, 0xff, 0xc7, 0x0e, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x3a, 0x3c, 0x20, 0xe3, 0x9b, + 0x0d, 0x00, 0x00, } diff --git a/protocol/bc/bc.proto b/protocol/bc/bc.proto index 596f5bdd..59d70c22 100644 --- a/protocol/bc/bc.proto +++ b/protocol/bc/bc.proto @@ -51,6 +51,7 @@ message ValueDestination { message Proof { bytes sign = 1; bytes controlProgram = 2; + bytes address = 3; } message BytomBlockHeader { @@ -63,7 +64,6 @@ message BytomBlockHeader { uint64 nonce = 7; uint64 bits = 8; TransactionStatus transaction_status = 9; - bytes extra = 10; } message BlockHeader { @@ -77,6 +77,8 @@ message BlockHeader { uint64 bits = 8; TransactionStatus transaction_status = 9; Proof Proof = 10; + bytes extra = 11; + bytes coinbase = 12; } message TxHeader { @@ -140,4 +142,16 @@ message Claim { repeated bytes witness_arguments = 3; uint64 ordinal = 4; repeated bytes Peginwitness = 5; -} \ No newline at end of file +} + +message Dpos { + Hash spent_output_id = 1; + ValueDestination witness_destination = 2; + repeated bytes witness_arguments = 3; + uint64 ordinal = 4; + uint32 type = 5; + string from = 6; + string to = 7; + uint64 stake = 8; + string data = 9; +} diff --git a/protocol/bc/blockheader.go b/protocol/bc/blockheader.go index 64476b73..45177554 100644 --- a/protocol/bc/blockheader.go +++ b/protocol/bc/blockheader.go @@ -11,15 +11,17 @@ func (bh *BlockHeader) writeForHash(w io.Writer) { mustWriteForHash(w, bh.Height) mustWriteForHash(w, bh.PreviousBlockId) mustWriteForHash(w, bh.Timestamp) + //mustWriteForHash(w, bh.Coinbase) mustWriteForHash(w, bh.TransactionsRoot) mustWriteForHash(w, bh.TransactionStatusHash) mustWriteForHash(w, bh.Proof.Sign) mustWriteForHash(w, bh.Proof.ControlProgram) + mustWriteForHash(w, bh.Extra) } // NewBlockHeader creates a new BlockHeader and populates // its body. -func NewBlockHeader(version, height uint64, previousBlockID *Hash, timestamp uint64, transactionsRoot, transactionStatusHash *Hash, proof *Proof) *BlockHeader { +func NewBlockHeader(version, height uint64, previousBlockID *Hash, timestamp uint64, transactionsRoot, transactionStatusHash *Hash, proof *Proof, extra []byte, coinbase []byte) *BlockHeader { return &BlockHeader{ Version: version, Height: height, @@ -29,5 +31,7 @@ func NewBlockHeader(version, height uint64, previousBlockID *Hash, timestamp uin TransactionStatusHash: transactionStatusHash, TransactionStatus: nil, Proof: proof, + Extra: extra, + Coinbase: coinbase, } } diff --git a/protocol/bc/dpos.go b/protocol/bc/dpos.go new file mode 100644 index 00000000..39106378 --- /dev/null +++ b/protocol/bc/dpos.go @@ -0,0 +1,36 @@ +package bc + +import ( + "io" +) + +func (Dpos) typ() string { return "dpos1" } +func (d *Dpos) writeForHash(w io.Writer) { + mustWriteForHash(w, d.SpentOutputId) + mustWriteForHash(w, d.Type) + mustWriteForHash(w, d.From) + mustWriteForHash(w, d.To) + mustWriteForHash(w, d.Stake) +} + +// SetDestination will link the spend to the output +func (d *Dpos) SetDestination(id *Hash, val *AssetAmount, pos uint64) { + d.WitnessDestination = &ValueDestination{ + Ref: id, + Value: val, + Position: pos, + } +} + +// NewDpos creates a new Spend. +func NewDpos(spentOutputID *Hash, ordinal uint64, t uint32, stake uint64, from, to, data string) *Dpos { + return &Dpos{ + SpentOutputId: spentOutputID, + Ordinal: ordinal, + Type: t, + From: from, + To: to, + Stake: stake, + Data: data, + } +} diff --git a/protocol/bc/entry.go b/protocol/bc/entry.go index 71f5c8bf..e315026b 100644 --- a/protocol/bc/entry.go +++ b/protocol/bc/entry.go @@ -88,6 +88,11 @@ func writeForHash(w io.Writer, c interface{}) error { binary.LittleEndian.PutUint64(buf[:], v) _, err := w.Write(buf[:]) return errors.Wrapf(err, "writing uint64 (%d) for hash", v) + case uint32: + buf := [8]byte{} + binary.LittleEndian.PutUint32(buf[:], v) + _, err := w.Write(buf[:]) + return errors.Wrapf(err, "writing uint64 (%d) for hash", v) case []byte: _, err := blockchain.WriteVarstr31(w, v) return errors.Wrapf(err, "writing []byte (len %d) for hash", len(v)) diff --git a/protocol/bc/types/block_header.go b/protocol/bc/types/block_header.go index c94724b0..ab47019a 100644 --- a/protocol/bc/types/block_header.go +++ b/protocol/bc/types/block_header.go @@ -15,6 +15,7 @@ import ( type Proof struct { Sign []byte ControlProgram []byte + Address []byte } func (p *Proof) readFrom(r *blockchain.Reader) (err error) { @@ -24,6 +25,9 @@ func (p *Proof) readFrom(r *blockchain.Reader) (err error) { if p.ControlProgram, err = blockchain.ReadVarstr31(r); err != nil { return err } + if p.Address, err = blockchain.ReadVarstr31(r); err != nil { + return err + } return nil } @@ -35,6 +39,9 @@ func (p *Proof) writeTo(w io.Writer) error { if _, err := blockchain.WriteVarstr31(w, p.ControlProgram); err != nil { return err } + if _, err := blockchain.WriteVarstr31(w, p.Address); err != nil { + return err + } return nil } @@ -44,7 +51,9 @@ type BlockHeader struct { Height uint64 // The height of the block. PreviousBlockHash bc.Hash // The hash of the previous block. Timestamp uint64 // The time of the block in seconds. + Coinbase []byte Proof Proof + Extra []byte BlockCommitment } @@ -108,12 +117,18 @@ func (bh *BlockHeader) readFrom(r *blockchain.Reader) (serflag uint8, err error) if bh.Timestamp, err = blockchain.ReadVarint63(r); err != nil { return 0, err } + if bh.Coinbase, err = blockchain.ReadVarstr31(r); err != nil { + return 0, err + } if _, err = blockchain.ReadExtensibleString(r, bh.BlockCommitment.readFrom); err != nil { return 0, err } if _, err = blockchain.ReadExtensibleString(r, bh.Proof.readFrom); err != nil { return 0, err } + if bh.Extra, err = blockchain.ReadVarstr31(r); err != nil { + return 0, err + } return } @@ -140,11 +155,17 @@ func (bh *BlockHeader) writeTo(w io.Writer, serflags uint8) (err error) { if _, err = blockchain.WriteVarint63(w, bh.Timestamp); err != nil { return err } + if _, err := blockchain.WriteVarstr31(w, bh.Coinbase); err != nil { + return err + } if _, err = blockchain.WriteExtensibleString(w, nil, bh.BlockCommitment.writeTo); err != nil { return err } if _, err = blockchain.WriteExtensibleString(w, nil, bh.Proof.writeTo); err != nil { return err } + if _, err = blockchain.WriteVarstr31(w, bh.Extra); err != nil { + return err + } return nil } diff --git a/protocol/bc/types/bytom/types/block_test.go b/protocol/bc/types/bytom/types/block_test.go new file mode 100644 index 00000000..8c432b81 --- /dev/null +++ b/protocol/bc/types/bytom/types/block_test.go @@ -0,0 +1,26 @@ +package types + +import ( + "testing" + + "github.com/vapor/protocol/bc" +) + +func TestRawBlock(t *testing.T) { + strRawBlock := "0301629f91cdd7f923fbb5283390f3c52b3c2559fcf15de352bae10b7ea0fc50f3e650dfe89de0054066408c9a91229b63bbb2b7510887a0519dcc6446c5b045bb8444afa939368e4f6978a65b4ee5b6f4914fe5c05000459a803ecf59132604e5d334d64249c5e50a0ecc99b38080808080200207010001010502030039380001013effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa090fbd59901011600143143baf513dead7f3478479401eb1ed38eb1e19f00070100010160015e89a6c0f09e3e512f01dc1e4cf42d28eb67b76def47c59257448af26945f542d8ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc09af7934f0001160014cc443b29275271cd5e793430fdb7e57e3eae247663024024245117a930a6d398c854fb31bb5e3921d82e357d58778d72e0b4b1f0a3583d0d02a399b06ec5209c86b01f590c36fb29b3f2a3dbbe4b5f6bd9d852085967022013604b73c1df1aff3c2503eff8aa93498dcc7800051f74814123980e09e76a0702013dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa0db90f3290116001497e164c8f82edc1412f7d87e37f84fb44b55c012000149ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80c8afa02501220020df81990a397ca4df7afb786e5fcd33951259e9b7341934c15c7362997a09c51e00" + block := &Block{} + err := block.UnmarshalText([]byte(strRawBlock)) + if err != nil { + t.Errorf("UnmarshalText err : %s", err) + } + strTxID := "f7a046ac947efa7da14696c2aa667f4657802609a6aafc056ee4c8d7f4940a83" + txID := bc.Hash{} + txID.UnmarshalText([]byte(strTxID)) + for _, tx := range block.Transactions { + tmp := tx.ID.String() + if tmp == txID.String() { + break + } + } + block.Hash() +} diff --git a/protocol/bc/types/claim_pegin.go b/protocol/bc/types/claim_pegin.go index aede9766..b0132051 100644 --- a/protocol/bc/types/claim_pegin.go +++ b/protocol/bc/types/claim_pegin.go @@ -4,11 +4,6 @@ import ( "github.com/vapor/protocol/bc" ) -type ClaimCommitment struct { - bc.AssetAmount - VMVersion uint64 - ControlProgram []byte -} // ClaimInput satisfies the TypedInput interface and represents a spend transaction. type ClaimInput struct { @@ -17,8 +12,8 @@ type ClaimInput struct { SpendCommitment } -// NewClaimInputInput create a new SpendInput struct. -func NewClaimInputInput(arguments [][]byte, sourceID bc.Hash, assetID bc.AssetID, amount, sourcePos uint64, controlProgram []byte) *TxInput { +// NewClaimInput create a new SpendInput struct. +func NewClaimInput(arguments [][]byte, sourceID bc.Hash, assetID bc.AssetID, amount, sourcePos uint64, controlProgram []byte) *TxInput { sc := SpendCommitment{ AssetAmount: bc.AssetAmount{ diff --git a/protocol/bc/types/dpos.go b/protocol/bc/types/dpos.go new file mode 100644 index 00000000..9646db93 --- /dev/null +++ b/protocol/bc/types/dpos.go @@ -0,0 +1,65 @@ +package types + +import ( + "github.com/vapor/protocol/bc" +) + +/* +const ( + LoginCandidate DposType = iota + LogoutCandidate + Delegate + UnDelegate +) +*/ + +type DposTx struct { + SpendCommitmentSuffix []byte + Type TxType + From string + To string + Amount uint64 + Stake uint64 + PaymentAmount uint64 + Arguments [][]byte + Info string + SpendCommitment +} + +func NewDpos(arguments [][]byte, from, to string, sourceID bc.Hash, assetID bc.AssetID, stake, amount, sourcePos uint64, controlProgram []byte, t TxType) *TxInput { + var vote string + switch t { + case LoginCandidate: + case LogoutCandidate: + case Delegate: + vote = "vapor:1:event:vote" + case UnDelegate: + } + sc := SpendCommitment{ + AssetAmount: bc.AssetAmount{ + AssetId: &assetID, + Amount: amount, + }, + SourceID: sourceID, + SourcePosition: sourcePos, + VMVersion: 1, + ControlProgram: controlProgram, + } + + return &TxInput{ + AssetVersion: 1, + TypedInput: &DposTx{ + SpendCommitment: sc, + Type: t, + Amount: amount, + Arguments: arguments, + Info: vote, + Stake: stake, + From: from, + To: to, + }, + } +} + +// InputType is the interface function for return the input type. +func (si *DposTx) InputType() uint8 { return DposInputType } diff --git a/protocol/bc/types/map.go b/protocol/bc/types/map.go index 9a3e11e7..e68f17ab 100644 --- a/protocol/bc/types/map.go +++ b/protocol/bc/types/map.go @@ -36,6 +36,15 @@ func MapTx(oldTx *TxData) *bc.Tx { ord = 0 case *bc.Claim: ord = 0 + case *bc.Dpos: + ord = e.Ordinal + //spentOutputIDs[*e.SpentOutputId] = true + /* + if *e.WitnessDestination.Value.AssetId == *consensus.BTMAssetID { + tx.GasInputIDs = append(tx.GasInputIDs, id) + } + */ + continue default: continue } @@ -62,6 +71,7 @@ func mapTx(tx *TxData) (headerID bc.Hash, hdr *bc.TxHeader, entryMap map[bc.Hash var ( spends []*bc.Spend + dposs []*bc.Dpos issuances []*bc.Issuance coinbase *bc.Coinbase claim *bc.Claim @@ -140,6 +150,26 @@ func mapTx(tx *TxData) (headerID bc.Hash, hdr *bc.TxHeader, entryMap map[bc.Hash Ref: &claimID, Value: &inp.AssetAmount, } + case *DposTx: + // create entry for prevout + prog := &bc.Program{VmVersion: inp.VMVersion, Code: inp.ControlProgram} + src := &bc.ValueSource{ + Ref: &inp.SourceID, + Value: &inp.AssetAmount, + Position: inp.SourcePosition, + } + prevout := bc.NewOutput(src, prog, 0) // ordinal doesn't matter for prevouts, only for result outputs + prevoutID := addEntry(prevout) + // create entry for dpos + dpos := bc.NewDpos(&prevoutID, uint64(i), uint32(inp.Type), inp.Stake, inp.From, inp.To, inp.Info) + dpos.WitnessArguments = inp.Arguments + dposID := addEntry(dpos) + // setup mux + muxSources[i] = &bc.ValueSource{ + Ref: &dposID, + Value: &inp.AssetAmount, + } + dposs = append(dposs, dpos) } } @@ -155,6 +185,12 @@ func mapTx(tx *TxData) (headerID bc.Hash, hdr *bc.TxHeader, entryMap map[bc.Hash issuance.SetDestination(&muxID, issuance.Value, issuance.Ordinal) } + // connect the inputs to the mux + for _, dpos := range dposs { + spentOutput := entryMap[*dpos.SpentOutputId].(*bc.Output) + dpos.SetDestination(&muxID, spentOutput.Source.Value, dpos.Ordinal) + } + if coinbase != nil { coinbase.SetDestination(&muxID, mux.Sources[0].Value, 0) } @@ -177,6 +213,7 @@ func mapTx(tx *TxData) (headerID bc.Hash, hdr *bc.TxHeader, entryMap map[bc.Hash r := bc.NewRetirement(src, uint64(i)) resultID = addEntry(r) } else { + // non-retirement prog := &bc.Program{out.VMVersion, out.ControlProgram} o := bc.NewOutput(src, prog, uint64(i)) @@ -197,7 +234,7 @@ func mapTx(tx *TxData) (headerID bc.Hash, hdr *bc.TxHeader, entryMap map[bc.Hash func mapBlockHeader(old *BlockHeader) (bc.Hash, *bc.BlockHeader) { proof := &bc.Proof{Sign: old.Proof.Sign, ControlProgram: old.Proof.ControlProgram} - bh := bc.NewBlockHeader(old.Version, old.Height, &old.PreviousBlockHash, old.Timestamp, &old.TransactionsMerkleRoot, &old.TransactionStatusHash, proof) + bh := bc.NewBlockHeader(old.Version, old.Height, &old.PreviousBlockHash, old.Timestamp, &old.TransactionsMerkleRoot, &old.TransactionStatusHash, proof, old.Extra, old.Coinbase) return bc.EntryID(bh), bh } diff --git a/protocol/bc/types/spend.go b/protocol/bc/types/spend.go index 8642d35a..01d00f37 100644 --- a/protocol/bc/types/spend.go +++ b/protocol/bc/types/spend.go @@ -4,6 +4,16 @@ import ( "github.com/vapor/protocol/bc" ) +type TxType uint8 + +const ( + Binary TxType = iota + LoginCandidate + LogoutCandidate + Delegate + UnDelegate +) + // SpendInput satisfies the TypedInput interface and represents a spend transaction. type SpendInput struct { SpendCommitmentSuffix []byte // The unconsumed suffix of the output commitment diff --git a/protocol/bc/types/txinput.go b/protocol/bc/types/txinput.go index 7ba0c90f..4fbcd188 100644 --- a/protocol/bc/types/txinput.go +++ b/protocol/bc/types/txinput.go @@ -15,6 +15,7 @@ const ( SpendInputType CoinbaseInputType ClainPeginInputType + DposInputType ) type ( @@ -48,6 +49,8 @@ func (t *TxInput) AssetAmount() bc.AssetAmount { return inp.AssetAmount case *ClaimInput: return inp.AssetAmount + case *DposTx: + return inp.AssetAmount } return bc.AssetAmount{} } @@ -61,6 +64,8 @@ func (t *TxInput) AssetID() bc.AssetID { return *inp.AssetId case *ClaimInput: return *inp.AssetId + case *DposTx: + return *inp.AssetId } return bc.AssetID{} @@ -75,6 +80,8 @@ func (t *TxInput) Amount() uint64 { return inp.Amount case *ClaimInput: return inp.Amount + case *DposTx: + return inp.Amount } return 0 } @@ -112,6 +119,8 @@ func (t *TxInput) Arguments() [][]byte { return inp.Arguments case *ClaimInput: return inp.Arguments + case *DposTx: + return inp.Arguments } return nil } @@ -125,6 +134,8 @@ func (t *TxInput) SetArguments(args [][]byte) { inp.Arguments = args case *ClaimInput: inp.Arguments = args + case *DposTx: + inp.Arguments = args } } @@ -186,7 +197,12 @@ func (t *TxInput) readFrom(r *blockchain.Reader) (err error) { if ci.Arbitrary, err = blockchain.ReadVarstr31(r); err != nil { return err } - + case DposInputType: + ci := new(DposTx) + t.TypedInput = ci + if ci.SpendCommitmentSuffix, err = ci.SpendCommitment.readFrom(r, 1); err != nil { + return err + } default: return fmt.Errorf("unsupported input type %d", icType[0]) } @@ -227,6 +243,40 @@ func (t *TxInput) readFrom(r *blockchain.Reader) (err error) { if inp.Arguments, err = blockchain.ReadVarstrList(r); err != nil { return err } + case *DposTx: + txType := uint64(0) + if txType, err = blockchain.ReadVarint63(r); err != nil { + return err + } + inp.Type = TxType(txType) + var from []byte + if from, err = blockchain.ReadVarstr31(r); err != nil { + return err + } + inp.From = string(from) + var to []byte + if to, err = blockchain.ReadVarstr31(r); err != nil { + return err + } + inp.To = string(to) + if inp.Amount, err = blockchain.ReadVarint63(r); err != nil { + return err + } + if inp.Stake, err = blockchain.ReadVarint63(r); err != nil { + return err + } + if inp.PaymentAmount, err = blockchain.ReadVarint63(r); err != nil { + return err + } + if inp.Arguments, err = blockchain.ReadVarstrList(r); err != nil { + return err + } + + var info []byte + if info, err = blockchain.ReadVarstr31(r); err != nil { + return err + } + inp.Info = string(info) } return nil }) @@ -287,6 +337,11 @@ func (t *TxInput) writeInputCommitment(w io.Writer) (err error) { if _, err = blockchain.WriteVarstr31(w, inp.Arbitrary); err != nil { return errors.Wrap(err, "writing coinbase arbitrary") } + case *DposTx: + if _, err = w.Write([]byte{DposInputType}); err != nil { + return err + } + return inp.SpendCommitment.writeExtensibleString(w, inp.SpendCommitmentSuffix, t.AssetVersion) } return nil } @@ -316,6 +371,31 @@ func (t *TxInput) writeInputWitness(w io.Writer) error { _, err := blockchain.WriteVarstrList(w, inp.Arguments) return err + case *DposTx: + if _, err := blockchain.WriteVarint63(w, uint64(inp.Type)); err != nil { + return err + } + if _, err := blockchain.WriteVarstr31(w, []byte(inp.From)); err != nil { + return err + } + if _, err := blockchain.WriteVarstr31(w, []byte(inp.To)); err != nil { + return err + } + if _, err := blockchain.WriteVarint63(w, inp.Amount); err != nil { + return err + } + if _, err := blockchain.WriteVarint63(w, inp.Stake); err != nil { + return err + } + if _, err := blockchain.WriteVarint63(w, inp.PaymentAmount); err != nil { + return err + } + if _, err := blockchain.WriteVarstrList(w, inp.Arguments); err != nil { + return err + } + _, err := blockchain.WriteVarstr31(w, []byte(inp.Info)) + + return err } return nil } diff --git a/protocol/block.go b/protocol/block.go index 2672acc0..1894f68b 100644 --- a/protocol/block.go +++ b/protocol/block.go @@ -158,7 +158,7 @@ func (c *Chain) saveBlock(block *types.Block) error { bcBlock := types.MapBlock(block) parent := c.index.GetNode(&block.PreviousBlockHash) - if err := validation.ValidateBlock(bcBlock, parent, block, c.Authoritys, c.position); err != nil { + if err := validation.ValidateBlock(bcBlock, parent, block, c, c.engine, c.Authoritys, c.position); err != nil { return errors.Sub(ErrBadBlock, err) } if err := c.store.SaveBlock(block, bcBlock.TransactionStatus); err != nil { diff --git a/protocol/protocol.go b/protocol/protocol.go index 45cefc89..abd6ddbd 100644 --- a/protocol/protocol.go +++ b/protocol/protocol.go @@ -6,6 +6,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/vapor/config" + engine "github.com/vapor/consensus/consensus" "github.com/vapor/errors" "github.com/vapor/protocol/bc" "github.com/vapor/protocol/bc/types" @@ -26,6 +27,7 @@ type Chain struct { bestNode *state.BlockNode Authoritys map[string]string position uint64 + engine engine.Engine } // NewChain returns a new Chain using store as the underlying storage. @@ -65,6 +67,14 @@ func (c *Chain) SetPosition(position uint64) { c.position = position } +func (c *Chain) GetAuthoritys(key string) string { + return c.Authoritys[key] +} + +func (c *Chain) SetConsensusEngine(engine engine.Engine) { + c.engine = engine +} + func (c *Chain) initChainStatus() error { genesisBlock := config.GenesisBlock() txStatus := bc.NewTransactionStatus() diff --git a/protocol/state/blockindex.go b/protocol/state/blockindex.go index 79fb25a0..0a610f27 100644 --- a/protocol/state/blockindex.go +++ b/protocol/state/blockindex.go @@ -33,6 +33,8 @@ type BlockNode struct { TransactionsMerkleRoot bc.Hash TransactionStatusHash bc.Hash Proof bc.Proof + Coinbase []byte + Extra []byte } func NewBlockNode(bh *types.BlockHeader, parent *BlockNode) (*BlockNode, error) { @@ -55,6 +57,8 @@ func NewBlockNode(bh *types.BlockHeader, parent *BlockNode) (*BlockNode, error) Sign: bh.Proof.Sign, ControlProgram: bh.Proof.ControlProgram, }, + Coinbase: bh.Coinbase, + Extra: bh.Extra, } /* if bh.Height == 0 { @@ -86,6 +90,8 @@ func (node *BlockNode) BlockHeader() *types.BlockHeader { TransactionsMerkleRoot: node.TransactionsMerkleRoot, TransactionStatusHash: node.TransactionStatusHash, }, + Coinbase: node.Coinbase, + Extra: node.Extra, } } diff --git a/protocol/store.go b/protocol/store.go index ba0e1389..915d0675 100644 --- a/protocol/store.go +++ b/protocol/store.go @@ -23,6 +23,9 @@ type Store interface { IsWithdrawSpent(hash *bc.Hash) bool SetWithdrawSpent(hash *bc.Hash) + + Set(hash *bc.Hash, data []byte) error + Get(hash *bc.Hash) ([]byte, error) } // BlockStoreState represents the core's db status diff --git a/protocol/txpool.go b/protocol/txpool.go index 05111716..cd7ebf14 100644 --- a/protocol/txpool.go +++ b/protocol/txpool.go @@ -203,7 +203,6 @@ func (tp *TxPool) ProcessTransaction(tx *types.Tx, statusFail bool, height, fee log.WithFields(log.Fields{"module": "ProcessTransaction", "error": "pegin-already-claimed"}).Error("ProcessTransaction error") return false, errors.New("pegin-already-claimed") } - txD := &TxDesc{ Tx: tx, StatusFail: statusFail, @@ -302,6 +301,7 @@ func (tp *TxPool) addTransaction(txD *TxDesc) error { atomic.StoreInt64(&tp.lastUpdated, time.Now().Unix()) tp.msgCh <- &TxPoolMsg{TxDesc: txD, MsgType: MsgNewTx} + log.WithField("tx_id", tx.ID.String()).Debug("Add tx to mempool") return nil } diff --git a/protocol/validation/block.go b/protocol/validation/block.go index 2f202607..e55e22c0 100644 --- a/protocol/validation/block.go +++ b/protocol/validation/block.go @@ -1,13 +1,13 @@ package validation import ( - "encoding/hex" "time" log "github.com/sirupsen/logrus" + "github.com/vapor/chain" "github.com/vapor/consensus" - "github.com/vapor/crypto/ed25519/chainkd" + engine "github.com/vapor/consensus/consensus" "github.com/vapor/errors" "github.com/vapor/protocol/bc" "github.com/vapor/protocol/bc/types" @@ -56,46 +56,47 @@ func checkCoinbaseAmount(b *bc.Block, amount uint64) error { } // ValidateBlockHeader check the block's header -func ValidateBlockHeader(b *bc.Block, parent *state.BlockNode) error { +func ValidateBlockHeader(b *bc.Block, block *types.Block, parent *state.BlockNode, c chain.Chain, engine engine.Engine) error { if b.Version < parent.Version { return errors.WithDetailf(errVersionRegression, "previous block verson %d, current block version %d", parent.Version, b.Version) } if b.Height != parent.Height+1 { return errors.WithDetailf(errMisorderedBlockHeight, "previous block height %d, current block height %d", parent.Height, b.Height) } - if b.Bits != parent.CalcNextBits() { - return errBadBits - } if parent.Hash != *b.PreviousBlockId { return errors.WithDetailf(errMismatchedBlock, "previous block ID %x, current block wants %x", parent.Hash.Bytes(), b.PreviousBlockId.Bytes()) } if err := checkBlockTime(b, parent); err != nil { return err } + if err := engine.VerifyHeader(c, &block.BlockHeader, false); err != nil { + return err + } + return nil } // ValidateBlock validates a block and the transactions within. -func ValidateBlock(b *bc.Block, parent *state.BlockNode, block *types.Block, authoritys map[string]string, position uint64) error { +func ValidateBlock(b *bc.Block, parent *state.BlockNode, block *types.Block, c chain.Chain, engine engine.Engine, authoritys map[string]string, position uint64) error { startTime := time.Now() - if err := ValidateBlockHeader(b, parent); err != nil { + if err := ValidateBlockHeader(b, block, parent, c, engine); err != nil { return err } - time.Sleep(3 * time.Second) - // 验证出块人 - controlProgram := hex.EncodeToString(block.Proof.ControlProgram) - xpub := &chainkd.XPub{} - xpub.UnmarshalText([]byte(authoritys[controlProgram])) - - msg := block.BlockCommitment.TransactionsMerkleRoot.Bytes() - if !xpub.Verify(msg, block.Proof.Sign) { - return errors.New("Verification signature failed") - } + /* + time.Sleep(3 * time.Second) + // 验证出块人 + controlProgram := hex.EncodeToString(block.Proof.ControlProgram) + xpub := &chainkd.XPub{} + xpub.UnmarshalText([]byte(authoritys[controlProgram])) + msg := block.BlockCommitment.TransactionsMerkleRoot.Bytes() + if !xpub.Verify(msg, block.Proof.Sign) { + return errors.New("Verification signature failed") + } + */ blockGasSum := uint64(0) coinbaseAmount := consensus.BlockSubsidy(b.BlockHeader.Height) b.TransactionStatus = bc.NewTransactionStatus() - for i, tx := range b.Transactions { gasStatus, err := ValidateTx(tx, b) if !gasStatus.GasValid { diff --git a/protocol/validation/tx.go b/protocol/validation/tx.go index 421cc8a4..fdb19ad9 100644 --- a/protocol/validation/tx.go +++ b/protocol/validation/tx.go @@ -148,6 +148,16 @@ func checkValid(vs *validationState, e bc.Entry) (err error) { case *bc.Mux: parity := make(map[bc.AssetID]int64) for i, src := range e.Sources { + e, ok := vs.tx.Entries[*src.Ref] + if !ok { + return errors.Wrapf(bc.ErrMissingEntry, "entry for bytom input %x not found", *src.Ref) + } + switch e.(type) { + case *bc.Dpos: + continue + default: + } + if src.Value.Amount > math.MaxInt64 { return errors.WithDetailf(ErrOverflow, "amount %d exceeds maximum value 2^63", src.Value.Amount) } @@ -188,7 +198,6 @@ func checkValid(vs *validationState, e bc.Entry) (err error) { if !ok { return errors.Wrapf(bc.ErrMissingEntry, "entry for bytom input %x not found", BTMInputID) } - vs2 := *vs vs2.entryID = BTMInputID if err := checkValid(&vs2, e); err != nil { @@ -260,7 +269,6 @@ func checkValid(vs *validationState, e bc.Entry) (err error) { if err != nil { return errors.Wrap(err, "getting spend prevout") } - gasLeft, err := vm.Verify(NewTxVMContext(vs, e, spentOutput.ControlProgram, e.WitnessArguments), vs.gasStatus.GasLeft) if err != nil { return errors.Wrap(err, "checking control program") @@ -353,6 +361,8 @@ func checkValid(vs *validationState, e bc.Entry) (err error) { return errors.Wrap(err, "checking spend destination") } vs.gasStatus.GasValid = true + case *bc.Dpos: + //fmt.Printf("kkkkkkkkkkkkkkkkkkkkkkkkkkk %T\n", e) default: return fmt.Errorf("entry has unexpected type %T", e) } @@ -369,20 +379,6 @@ type MerkleBlock struct { } func IsValidPeginWitness(peginWitness [][]byte, prevout bc.Output) (err error) { - /* - assetID := bc.AssetID{} - assetID.V0 = prevout.Source.Value.AssetId.GetV0() - assetID.V1 = prevout.Source.Value.AssetId.GetV1() - assetID.V2 = prevout.Source.Value.AssetId.GetV2() - assetID.V3 = prevout.Source.Value.AssetId.GetV3() - //bytomPrevout.Source.Value.AssetId = &assetId - - sourceID := bc.Hash{} - sourceID.V0 = prevout.Source.Ref.GetV0() - sourceID.V1 = prevout.Source.Ref.GetV1() - sourceID.V2 = prevout.Source.Ref.GetV2() - sourceID.V3 = prevout.Source.Ref.GetV3() - */ assetAmount := &bc.AssetAmount{ AssetId: prevout.Source.Value.AssetId, @@ -407,11 +403,11 @@ func IsValidPeginWitness(peginWitness [][]byte, prevout bc.Output) (err error) { if !consensus.MoneyRange(amount) { return errors.New("Amount out of range") } - - if len(peginWitness[1]) != 64 { - return errors.New("The length of gennesisBlockHash is not correct") - } - + /* + if len(peginWitness[1]) != 32 { + return errors.New("The length of gennesisBlockHash is not correct") + } + */ claimScript := peginWitness[2] rawTx := &bytomtypes.Tx{} @@ -443,9 +439,10 @@ func IsValidPeginWitness(peginWitness [][]byte, prevout bc.Output) (err error) { if err = checkPeginTx(rawTx, bytomPrevout, amount, claimScript); err != nil { return err } - + var b bc.Hash + b.UnmarshalText(peginWitness[1]) // Check the genesis block corresponds to a valid peg (only one for now) - if !bytes.Equal(peginWitness[1], []byte(consensus.ActiveNetParams.ParentGenesisBlockHash)) { + if b.String() != consensus.ActiveNetParams.ParentGenesisBlockHash { return errors.New("ParentGenesisBlockHash don't match") } // TODO Finally, validate peg-in via rpc call @@ -536,6 +533,11 @@ func checkValidSrc(vstate *validationState, vs *bc.ValueSource) error { return errors.Wrapf(ErrPosition, "invalid position %d for coinbase source", vs.Position) } dest = ref.WitnessDestination + case *bc.Dpos: + if vs.Position != 0 { + return errors.Wrapf(ErrPosition, "invalid position %d for coinbase source", vs.Position) + } + dest = ref.WitnessDestination default: return errors.Wrapf(bc.ErrEntryType, "value source is %T, should be coinbase, issuance, spend, or mux", e) } diff --git a/protocol/vm/bitwise.go b/protocol/vm/bitwise.go index 1d20dcf3..853e6469 100644 --- a/protocol/vm/bitwise.go +++ b/protocol/vm/bitwise.go @@ -1,6 +1,8 @@ package vm -import "bytes" +import ( + "bytes" +) func opInvert(vm *virtualMachine) error { err := vm.applyCost(1) diff --git a/protocol/vm/vm.go b/protocol/vm/vm.go index f7b5a539..38afaa63 100644 --- a/protocol/vm/vm.go +++ b/protocol/vm/vm.go @@ -132,7 +132,6 @@ func (vm *virtualMachine) step() error { fmt.Fprintf(TraceOut, " stack %d: %x\n", len(vm.dataStack)-1-i, vm.dataStack[i]) } } - return nil } diff --git a/wallet/indexer.go b/wallet/indexer.go index 4c286797..8ae4bacc 100644 --- a/wallet/indexer.go +++ b/wallet/indexer.go @@ -1,6 +1,7 @@ package wallet import ( + "bytes" "encoding/json" "fmt" "sort" @@ -15,6 +16,7 @@ import ( chainjson "github.com/vapor/encoding/json" "github.com/vapor/protocol/bc" "github.com/vapor/protocol/bc/types" + "github.com/vapor/protocol/vm/vmutil" ) const ( @@ -119,11 +121,18 @@ func (w *Wallet) indexTransactions(batch db.Batch, b *types.Block, txStatus *bc. // filterAccountTxs related and build the fully annotated transactions. func (w *Wallet) filterAccountTxs(b *types.Block, txStatus *bc.TransactionStatus) []*query.AnnotatedTx { annotatedTxs := make([]*query.AnnotatedTx, 0, len(b.Transactions)) - + redeemContract := w.dposAddress.ScriptAddress() + program, _ := vmutil.P2WPKHProgram(redeemContract) transactionLoop: for pos, tx := range b.Transactions { statusFail, _ := txStatus.GetStatus(pos) for _, v := range tx.Outputs { + + if bytes.Equal(v.ControlProgram, program) { + annotatedTxs = append(annotatedTxs, w.buildAnnotatedTransaction(tx, b, statusFail, pos)) + continue transactionLoop + } + var hash [32]byte sha3pool.Sum256(hash[:], v.ControlProgram) if bytes := w.DB.Get(account.ContractKey(hash)); bytes != nil { @@ -133,6 +142,11 @@ transactionLoop: } for _, v := range tx.Inputs { + if bytes.Equal(v.ControlProgram(), program) { + annotatedTxs = append(annotatedTxs, w.buildAnnotatedTransaction(tx, b, statusFail, pos)) + continue transactionLoop + } + outid, err := v.SpentOutputID() if err != nil { continue diff --git a/wallet/utxo.go b/wallet/utxo.go index ab9fa1ea..a2ffb92e 100644 --- a/wallet/utxo.go +++ b/wallet/utxo.go @@ -2,6 +2,7 @@ package wallet import ( "encoding/json" + "fmt" log "github.com/sirupsen/logrus" "github.com/tendermint/tmlibs/db" @@ -13,6 +14,7 @@ import ( "github.com/vapor/errors" "github.com/vapor/protocol/bc" "github.com/vapor/protocol/bc/types" + "github.com/vapor/protocol/vm/vmutil" ) // GetAccountUtxos return all account unspent outputs @@ -45,6 +47,23 @@ func (w *Wallet) GetAccountUtxos(accountID string, id string, unconfirmed, isSma } func (w *Wallet) attachUtxos(batch db.Batch, b *types.Block, txStatus *bc.TransactionStatus) { + /* + a := bc.Hash{} + a.UnmarshalText([]byte("bef9c83e5cadc6dbb80b81387f3e3c3fadd76b917e5337f5442b9ef071c06526")) + batch.Delete(account.StandardUTXOKey(a)) + a.UnmarshalText([]byte("1a5e2141a12823dabf343b5ace0a181a3d018e24f3dc6e7c3704b66fc040ca7b")) + batch.Delete(account.StandardUTXOKey(a)) + a.UnmarshalText([]byte("4647b1e0893f56438f9bbde6134840f1595da799cfc6ece77c4d9aabdf9cfe50")) + batch.Delete(account.StandardUTXOKey(a)) + a.UnmarshalText([]byte("928094d14b00aaf674ee291bbfb0c843a4dab53984f6235b998338fe0fa2d688")) + batch.Delete(account.StandardUTXOKey(a)) + a.UnmarshalText([]byte("e20aee90018f8b6483d5590786fcf495bccfa7f1a3a5a5a9106c4143f71d49a4")) + batch.Delete(account.StandardUTXOKey(a)) + a.UnmarshalText([]byte("2cb18fe2dd3eb8dcf2df43aa6650851dd0b6de291bfffd151a36703c92f8e864")) + batch.Delete(account.StandardUTXOKey(a)) + a.UnmarshalText([]byte("48d71e6da11de69983b0cc79787f0f9422a144c94e687dfec11b4a57fdca2832")) + batch.Delete(account.StandardUTXOKey(a)) + */ for txIndex, tx := range b.Transactions { statusFail, err := txStatus.GetStatus(txIndex) if err != nil { @@ -108,6 +127,9 @@ func (w *Wallet) detachUtxos(batch db.Batch, b *types.Block, txStatus *bc.Transa func (w *Wallet) filterAccountUtxo(utxos []*account.UTXO) []*account.UTXO { outsByScript := make(map[string][]*account.UTXO, len(utxos)) + redeemContract := w.dposAddress.ScriptAddress() + program, _ := vmutil.P2WPKHProgram(redeemContract) + isDposAddress := false for _, utxo := range utxos { scriptStr := string(utxo.ControlProgram) outsByScript[scriptStr] = append(outsByScript[scriptStr], utxo) @@ -126,21 +148,33 @@ func (w *Wallet) filterAccountUtxo(utxos []*account.UTXO) []*account.UTXO { sha3pool.Sum256(hash[:], []byte(s)) data := w.DB.Get(account.ContractKey(hash)) if data == nil { - continue - } + if s == string(program) { + isDposAddress = true + } else { + continue + } - cp := &account.CtrlProgram{} - if err := json.Unmarshal(data, cp); err != nil { - log.WithField("err", err).Error("filterAccountUtxo fail on unmarshal control program") - continue } - for _, utxo := range outsByScript[s] { - utxo.AccountID = cp.AccountID - utxo.Address = cp.Address - utxo.ControlProgramIndex = cp.KeyIndex - utxo.Change = cp.Change - result = append(result, utxo) + if !isDposAddress { + cp := &account.CtrlProgram{} + if err := json.Unmarshal(data, cp); err != nil { + log.WithField("err", err).Error("filterAccountUtxo fail on unmarshal control program") + continue + } + for _, utxo := range outsByScript[s] { + utxo.AccountID = cp.AccountID + utxo.Address = cp.Address + utxo.ControlProgramIndex = cp.KeyIndex + utxo.Change = cp.Change + result = append(result, utxo) + } + } else { + for _, utxo := range outsByScript[s] { + utxo.Address = w.dposAddress.EncodeAddress() + result = append(result, utxo) + } + isDposAddress = false } } return result @@ -169,7 +203,6 @@ func txInToUtxos(tx *types.Tx, statusFail bool) []*account.UTXO { if err != nil { continue } - resOut, err := tx.Output(*sp.SpentOutputId) if err != nil { log.WithField("err", err).Error("txInToUtxos fail on get resOut") @@ -177,6 +210,7 @@ func txInToUtxos(tx *types.Tx, statusFail bool) []*account.UTXO { } if statusFail && *resOut.Source.Value.AssetId != *consensus.BTMAssetID { + fmt.Println("statusFail:", statusFail) continue } diff --git a/wallet/wallet.go b/wallet/wallet.go index 8aa779a2..3f764185 100644 --- a/wallet/wallet.go +++ b/wallet/wallet.go @@ -10,6 +10,7 @@ import ( "github.com/vapor/account" "github.com/vapor/asset" "github.com/vapor/blockchain/pseudohsm" + "github.com/vapor/common" "github.com/vapor/protocol" "github.com/vapor/protocol/bc" "github.com/vapor/protocol/bc/types" @@ -41,10 +42,11 @@ type Wallet struct { chain *protocol.Chain RecoveryMgr *recoveryManager rescanCh chan struct{} + dposAddress common.Address } //NewWallet return a new wallet instance -func NewWallet(walletDB db.DB, account *account.Manager, asset *asset.Registry, hsm *pseudohsm.HSM, chain *protocol.Chain) (*Wallet, error) { +func NewWallet(walletDB db.DB, account *account.Manager, asset *asset.Registry, hsm *pseudohsm.HSM, chain *protocol.Chain, dposAddress common.Address) (*Wallet, error) { w := &Wallet{ DB: walletDB, AccountMgr: account, @@ -53,6 +55,7 @@ func NewWallet(walletDB db.DB, account *account.Manager, asset *asset.Registry, Hsm: hsm, RecoveryMgr: newRecoveryManager(walletDB, account), rescanCh: make(chan struct{}, 1), + dposAddress: dposAddress, } if err := w.loadWalletInfo(); err != nil {