--- /dev/null
+package account
+
+import (
+ "context"
+ "encoding/json"
+
+ "github.com/vapor/config"
+
+ "github.com/vapor/blockchain/txbuilder"
+ "github.com/vapor/common"
+ "github.com/vapor/consensus"
+ "github.com/vapor/crypto/ed25519/chainkd"
+ "github.com/vapor/errors"
+ "github.com/vapor/protocol/bc"
+ "github.com/vapor/protocol/bc/types"
+ "github.com/vapor/protocol/vm/vmutil"
+)
+
+func (m *Manager) DecodeDposAction(data []byte) (txbuilder.Action, error) {
+ a := &DopsAction{Accounts: m}
+ err := json.Unmarshal(data, a)
+ return a, err
+}
+
+type DopsAction struct {
+ Accounts *Manager
+ bc.AssetAmount
+ From string `json:"from"`
+ To string `json:"to"`
+ Fee uint64 `json:"fee"`
+ UseUnconfirmed bool `json:"use_unconfirmed"`
+}
+
+func (a *DopsAction) Build(ctx context.Context, b *txbuilder.TemplateBuilder) error {
+ var missing []string
+
+ if a.AssetId.IsZero() {
+ missing = append(missing, "asset_id")
+ }
+ if a.From == "" {
+ missing = append(missing, "from")
+ }
+ if a.To == "" {
+ missing = append(missing, "to")
+ }
+
+ if len(missing) > 0 {
+ return txbuilder.MissingFieldsError(missing...)
+ }
+ res, err := a.Accounts.utxoKeeper.ReserveByAddress(a.From, a.AssetId, a.Amount, a.UseUnconfirmed, false)
+ if err != nil {
+ return errors.Wrap(err, "reserving utxos")
+ }
+
+ // Cancel the reservation if the build gets rolled back.
+ b.OnRollback(func() { a.Accounts.utxoKeeper.Cancel(res.id) })
+ for _, r := range res.utxos {
+ txInput, sigInst, err := DposTx(a.From, a.To, a.Amount, r)
+ if err != nil {
+ return errors.Wrap(err, "creating inputs")
+ }
+ if err = b.AddInput(txInput, sigInst); err != nil {
+ return errors.Wrap(err, "adding inputs")
+ }
+ }
+
+ res, err = a.Accounts.utxoKeeper.ReserveByAddress(a.From, a.AssetId, a.Fee, a.UseUnconfirmed, true)
+ if err != nil {
+ return errors.Wrap(err, "reserving utxos")
+ }
+
+ // Cancel the reservation if the build gets rolled back.
+ b.OnRollback(func() { a.Accounts.utxoKeeper.Cancel(res.id) })
+ for _, r := range res.utxos {
+ txSpendInput, sigInst, err := spendInput(r)
+ if err != nil {
+ return errors.Wrap(err, "creating inputs")
+ }
+
+ if err = b.AddInput(txSpendInput, sigInst); err != nil {
+ return errors.Wrap(err, "adding inputs")
+ }
+ }
+ if res.change >= 0 {
+ address, err := common.DecodeAddress(a.From, &consensus.ActiveNetParams)
+ if err != nil {
+ return err
+ }
+ redeemContract := address.ScriptAddress()
+ program, err := vmutil.P2WPKHProgram(redeemContract)
+ if err != nil {
+ return err
+ }
+ if err = b.AddOutput(types.NewTxOutput(*consensus.BTMAssetID, res.change, program)); err != nil {
+ return errors.Wrap(err, "adding change output")
+ }
+ }
+
+ return nil
+}
+
+func (a *DopsAction) ActionType() string {
+ return "dpos"
+}
+
+// DposInputs convert an utxo to the txinput
+func DposTx(from, to string, stake uint64, u *UTXO) (*types.TxInput, *txbuilder.SigningInstruction, error) {
+ txInput := types.NewDpos(nil, from, to, u.SourceID, u.AssetID, stake, u.Amount, u.SourcePos, u.ControlProgram, types.Delegate)
+ sigInst := &txbuilder.SigningInstruction{}
+ var xpubs []chainkd.XPub
+ var xprv chainkd.XPrv
+ xprv.UnmarshalText([]byte(config.CommonConfig.Consensus.Dpos.XPrv))
+ xpubs = append(xpubs, xprv.XPub())
+ quorum := len(xpubs)
+ if u.Address == "" {
+ sigInst.AddWitnessKeysWithOutPath(xpubs, quorum)
+ return txInput, sigInst, nil
+ }
+
+ address, err := common.DecodeAddress(u.Address, &consensus.ActiveNetParams)
+ if err != nil {
+ return nil, nil, err
+ }
+ sigInst.AddRawWitnessKeysWithoutPath(xpubs, quorum)
+ switch address.(type) {
+ case *common.AddressWitnessPubKeyHash:
+ derivedPK := xpubs[0].PublicKey()
+ sigInst.WitnessComponents = append(sigInst.WitnessComponents, txbuilder.DataWitness([]byte(derivedPK)))
+
+ case *common.AddressWitnessScriptHash:
+ derivedXPubs := xpubs
+ derivedPKs := chainkd.XPubKeys(derivedXPubs)
+ script, err := vmutil.P2SPMultiSigProgram(derivedPKs, quorum)
+ if err != nil {
+ return nil, nil, err
+ }
+ sigInst.WitnessComponents = append(sigInst.WitnessComponents, txbuilder.DataWitness(script))
+
+ default:
+ return nil, nil, errors.New("unsupport address type")
+ }
+
+ return txInput, sigInst, nil
+}
+
+// spendInput convert an utxo to the txinput
+func spendInput(u *UTXO) (*types.TxInput, *txbuilder.SigningInstruction, error) {
+ txSpendInput := types.NewSpendInput(nil, u.SourceID, u.AssetID, u.Amount, u.SourcePos, u.ControlProgram)
+ sigInst := &txbuilder.SigningInstruction{}
+ var xpubs []chainkd.XPub
+ var xprv chainkd.XPrv
+ xprv.UnmarshalText([]byte(config.CommonConfig.Consensus.Dpos.XPrv))
+ xpubs = append(xpubs, xprv.XPub())
+ quorum := len(xpubs)
+ if u.Address == "" {
+ sigInst.AddWitnessKeysWithOutPath(xpubs, quorum)
+ return txSpendInput, sigInst, nil
+ }
+
+ address, err := common.DecodeAddress(u.Address, &consensus.ActiveNetParams)
+ if err != nil {
+ return nil, nil, err
+ }
+ sigInst.AddRawWitnessKeysWithoutPath(xpubs, quorum)
+ switch address.(type) {
+ case *common.AddressWitnessPubKeyHash:
+ derivedPK := xpubs[0].PublicKey()
+ sigInst.WitnessComponents = append(sigInst.WitnessComponents, txbuilder.DataWitness([]byte(derivedPK)))
+
+ case *common.AddressWitnessScriptHash:
+ derivedXPubs := xpubs
+ derivedPKs := chainkd.XPubKeys(derivedXPubs)
+ script, err := vmutil.P2SPMultiSigProgram(derivedPKs, quorum)
+ if err != nil {
+ return nil, nil, err
+ }
+ sigInst.WitnessComponents = append(sigInst.WitnessComponents, txbuilder.DataWitness(script))
+
+ default:
+ return nil, nil, errors.New("unsupport address type")
+ }
+
+ return txSpendInput, sigInst, nil
+}
return result, nil
}
+func (uk *utxoKeeper) ReserveByAddress(address string, assetID *bc.AssetID, amount uint64, useUnconfirmed bool, isReserved bool) (*reservation, error) {
+ uk.mtx.Lock()
+ defer uk.mtx.Unlock()
+
+ utxos, immatureAmount := uk.findUtxosByAddress(address, assetID, useUnconfirmed)
+ optUtxos, optAmount, reservedAmount := uk.optUTXOs(utxos, amount)
+ if optAmount+reservedAmount+immatureAmount < amount {
+ return nil, ErrInsufficient
+ }
+ if optAmount+reservedAmount < amount {
+ return nil, ErrImmature
+ }
+
+ if optAmount < amount {
+ return nil, ErrReserved
+ }
+
+ result := &reservation{
+ id: atomic.AddUint64(&uk.nextIndex, 1),
+ utxos: optUtxos,
+ change: optAmount - amount,
+ }
+
+ uk.reservations[result.id] = result
+ if isReserved {
+ for _, u := range optUtxos {
+ uk.reserved[u.OutputID] = result.id
+ }
+ }
+
+ return result, nil
+}
+
func (uk *utxoKeeper) ReserveParticular(outHash bc.Hash, useUnconfirmed bool, exp time.Time) (*reservation, error) {
uk.mtx.Lock()
defer uk.mtx.Unlock()
return utxos, immatureAmount
}
+func (uk *utxoKeeper) findUtxosByAddress(address string, assetID *bc.AssetID, useUnconfirmed bool) ([]*UTXO, uint64) {
+ immatureAmount := uint64(0)
+ currentHeight := uk.currentHeight()
+ utxos := []*UTXO{}
+ appendUtxo := func(u *UTXO) {
+ if u.Address != address || u.AssetID != *assetID {
+ return
+ }
+ if u.ValidHeight > currentHeight {
+ immatureAmount += u.Amount
+ } else {
+ utxos = append(utxos, u)
+ }
+ }
+
+ utxoIter := uk.db.IteratorPrefix([]byte(UTXOPreFix))
+ defer utxoIter.Release()
+ for utxoIter.Next() {
+ u := &UTXO{}
+ if err := json.Unmarshal(utxoIter.Value(), u); err != nil {
+ log.WithField("err", err).Error("utxoKeeper findUtxos fail on unmarshal utxo")
+ continue
+ }
+ appendUtxo(u)
+ }
+ if !useUnconfirmed {
+ return utxos, immatureAmount
+ }
+
+ for _, u := range uk.unconfirmed {
+ appendUtxo(u)
+ }
+ return utxos, immatureAmount
+}
+
func (uk *utxoKeeper) findUtxo(outHash bc.Hash, useUnconfirmed bool) (*UTXO, error) {
if u, ok := uk.unconfirmed[outHash]; useUnconfirmed && ok {
return u, nil
"github.com/vapor/dashboard/dashboard"
"github.com/vapor/dashboard/equity"
"github.com/vapor/errors"
- "github.com/vapor/mining/cpuminer"
+ "github.com/vapor/mining/miner"
"github.com/vapor/mining/miningpool"
"github.com/vapor/net/http/authn"
"github.com/vapor/net/http/gzip"
// API is the scheduling center for server
type API struct {
- sync *netsync.SyncManager
- wallet *wallet.Wallet
- accessTokens *accesstoken.CredentialStore
- chain *protocol.Chain
- server *http.Server
- handler http.Handler
- txFeedTracker *txfeed.Tracker
- cpuMiner *cpuminer.CPUMiner
+ sync *netsync.SyncManager
+ wallet *wallet.Wallet
+ accessTokens *accesstoken.CredentialStore
+ chain *protocol.Chain
+ server *http.Server
+ handler http.Handler
+ txFeedTracker *txfeed.Tracker
+ //cpuMiner *cpuminer.CPUMiner
+ miner *miner.Miner
miningPool *miningpool.MiningPool
notificationMgr *websocket.WSNotificationManager
newBlockCh chan *bc.Hash
}
// NewAPI create and initialize the API
-func NewAPI(sync *netsync.SyncManager, wallet *wallet.Wallet, txfeeds *txfeed.Tracker, cpuMiner *cpuminer.CPUMiner, miningPool *miningpool.MiningPool, chain *protocol.Chain, config *cfg.Config, token *accesstoken.CredentialStore, newBlockCh chan *bc.Hash, notificationMgr *websocket.WSNotificationManager) *API {
+func NewAPI(sync *netsync.SyncManager, wallet *wallet.Wallet, txfeeds *txfeed.Tracker, miner *miner.Miner, miningPool *miningpool.MiningPool, chain *protocol.Chain, config *cfg.Config, token *accesstoken.CredentialStore, newBlockCh chan *bc.Hash, notificationMgr *websocket.WSNotificationManager) *API {
api := &API{
sync: sync,
wallet: wallet,
chain: chain,
accessTokens: token,
txFeedTracker: txfeeds,
- cpuMiner: cpuMiner,
+ miner: miner,
miningPool: miningPool,
newBlockCh: newBlockCh,
m.Handle("/get-side-raw-transaction", jsonHandler(a.getSideRawTransaction))
m.Handle("/build-mainchain-tx", jsonHandler(a.buildMainChainTxForContract))
m.Handle("/sign-with-key", jsonHandler(a.signWithKey))
+ m.Handle("/dpos", jsonHandler(a.dpos))
} else {
log.Warn("Please enable wallet")
}
// 用输出作为交易输入 生成新的交易
builder := txbuilder.NewBuilder(time.Now())
// TODO 根据raw tx生成一个utxo
- //txInput := types.NewClaimInputInput(nil, *ins.RawTx.Outputs[nOut].AssetId, ins.RawTx.Outputs[nOut].Amount, cp.ControlProgram)
sourceID := *ins.RawTx.OutputID(nOut)
outputAccount := ins.RawTx.Outputs[nOut].Amount
assetID := *ins.RawTx.Outputs[nOut].AssetId
- txInput := types.NewClaimInputInput(nil, sourceID, assetID, outputAccount, uint64(nOut), cp.ControlProgram)
+ txInput := types.NewClaimInput(nil, sourceID, assetID, outputAccount, uint64(nOut), cp.ControlProgram)
if err := builder.AddInput(txInput, &txbuilder.SigningInstruction{}); err != nil {
return nil, err
}
// 用输出作为交易输入 生成新的交易
builder := txbuilder.NewBuilder(time.Now())
// TODO 根据raw tx生成一个utxo
- //txInput := types.NewClaimInputInput(nil, *ins.RawTx.Outputs[nOut].AssetId, ins.RawTx.Outputs[nOut].Amount, cp.ControlProgram)
sourceID := *ins.RawTx.OutputID(nOut)
outputAccount := ins.RawTx.Outputs[nOut].Amount
assetID := *ins.RawTx.Outputs[nOut].AssetId
- txInput := types.NewClaimInputInput(nil, sourceID, assetID, outputAccount, uint64(nOut), cp.ControlProgram)
+ txInput := types.NewClaimInput(nil, sourceID, assetID, outputAccount, uint64(nOut), cp.ControlProgram)
if err := builder.AddInput(txInput, &txbuilder.SigningInstruction{}); err != nil {
return nil, err
}
--- /dev/null
+package api
+
+import (
+ "context"
+ "time"
+
+ log "github.com/sirupsen/logrus"
+ "github.com/vapor/account"
+ "github.com/vapor/blockchain/txbuilder"
+ "github.com/vapor/config"
+ "github.com/vapor/crypto/ed25519/chainkd"
+ "github.com/vapor/errors"
+ "github.com/vapor/protocol/bc"
+)
+
+func (a *API) dpos(ctx context.Context, ins struct {
+ To string `json:"to"`
+ Fee uint64 `json:"fee"`
+ Stake uint64 `json:"stake"`
+ TxType uint8 `json:"tx_type"`
+}) Response {
+ // 找到utxo
+ var assetID bc.AssetID
+ assetID.UnmarshalText([]byte("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"))
+ // 生成dpos交易
+ dpos := account.DopsAction{
+ Accounts: a.wallet.AccountMgr,
+ From: config.CommonConfig.Consensus.Dpos.Coinbase,
+ To: ins.To,
+ Fee: ins.Fee,
+ }
+ dpos.Amount = ins.Stake
+ dpos.AssetId = &assetID
+ builder := txbuilder.NewBuilder(time.Now())
+ if err := dpos.Build(ctx, builder); err != nil {
+ return NewErrorResponse(err)
+ }
+
+ // 签名
+ tmpl, _, err := builder.Build()
+ if err != nil {
+ return NewErrorResponse(err)
+ }
+ var xprv chainkd.XPrv
+ xprv.UnmarshalText([]byte(config.CommonConfig.Consensus.Dpos.XPrv))
+ if err := signWithKey(tmpl, xprv); err != nil {
+ return NewErrorResponse(err)
+ }
+ log.Info("Sign Transaction complete.")
+ log.Info(txbuilder.SignProgress(tmpl))
+ //return NewSuccessResponse(&signTemplateResp{Tx: tmpl, SignComplete: txbuilder.SignProgress(tmpl)})
+ // 提交
+
+ if err := txbuilder.FinalizeTx(ctx, a.chain, tmpl.Transaction); err != nil {
+ return NewErrorResponse(err)
+ }
+
+ log.WithField("tx_id", tmpl.Transaction.ID.String()).Info("submit single tx")
+ return NewSuccessResponse(&submitTxResp{TxID: &tmpl.Transaction.ID})
+
+ //return NewSuccessResponse(nil)
+}
+
+func signWithKey(tmpl *txbuilder.Template, xprv chainkd.XPrv) error {
+ for i, sigInst := range tmpl.SigningInstructions {
+ for j, wc := range sigInst.WitnessComponents {
+ switch sw := wc.(type) {
+ case *txbuilder.SignatureWitness:
+ err := sw.Sign(tmpl, uint32(i), xprv)
+ if err != nil {
+ return errors.WithDetailf(err, "adding signature(s) to signature witness component %d of input %d", j, i)
+ }
+ case *txbuilder.RawTxSigWitness:
+ err := sw.Sign(tmpl, uint32(i), xprv)
+ if err != nil {
+ return errors.WithDetailf(err, "adding signature(s) to raw-signature witness component %d of input %d", j, i)
+ }
+ }
+ }
+ }
+ return materializeWitnessesWithKey(tmpl)
+}
+
+func materializeWitnessesWithKey(txTemplate *txbuilder.Template) error {
+ msg := txTemplate.Transaction
+
+ if msg == nil {
+ return errors.Wrap(txbuilder.ErrMissingRawTx)
+ }
+
+ if len(txTemplate.SigningInstructions) > len(msg.Inputs) {
+ return errors.Wrap(txbuilder.ErrBadInstructionCount)
+ }
+
+ for i, sigInst := range txTemplate.SigningInstructions {
+ if msg.Inputs[sigInst.Position] == nil {
+ return errors.WithDetailf(txbuilder.ErrBadTxInputIdx, "signing instruction %d references missing tx input %d", i, sigInst.Position)
+ }
+
+ var witness [][]byte
+ for j, wc := range sigInst.WitnessComponents {
+ err := wc.Materialize(&witness)
+ if err != nil {
+ return errors.WithDetailf(err, "error in witness component %d of input %d", j, i)
+ }
+ }
+ msg.SetInputArguments(sigInst.Position, witness)
+ }
+
+ return nil
+}
log "github.com/sirupsen/logrus"
"github.com/vapor/blockchain/txbuilder"
+ "github.com/vapor/common"
+ "github.com/vapor/consensus"
+ "github.com/vapor/crypto"
"github.com/vapor/crypto/ed25519/chainkd"
)
}
type keyPair struct {
- Xpub chainkd.XPub `json:"xpub"`
- Xprv chainkd.XPrv `json:"xprv"`
+ Xpub chainkd.XPub `json:"xpub"`
+ Xprv chainkd.XPrv `json:"xprv"`
+ Address string `json:"address,omitempty"`
}
func (a *API) createXKeys(ctx context.Context) Response {
if err != nil {
return NewErrorResponse(err)
}
- return NewSuccessResponse(&keyPair{Xprv: xprv, Xpub: xpub})
+
+ pubHash := crypto.Ripemd160(xpub.PublicKey())
+
+ address, err := common.NewAddressWitnessPubKeyHash(pubHash, &consensus.ActiveNetParams)
+ if err != nil {
+ return NewErrorResponse(err)
+ }
+
+ return NewSuccessResponse(&keyPair{Xprv: xprv, Xpub: xpub, Address: address.EncodeAddress()})
}
}
func (a *API) startMining() Response {
- a.cpuMiner.Start()
+ //a.cpuMiner.Start()
+ a.miner.Start()
if !a.IsMining() {
return NewErrorResponse(errors.New("Failed to start mining"))
}
}
func (a *API) stopMining() Response {
- a.cpuMiner.Stop()
+ //a.cpuMiner.Stop()
+ a.miner.Stop()
if a.IsMining() {
return NewErrorResponse(errors.New("Failed to stop mining"))
}
info := &NetInfo{
Listening: a.sync.Switch().IsListening(),
Syncing: !a.sync.IsCaughtUp(),
- Mining: a.cpuMiner.IsMining(),
+ Mining: a.miner.IsMining(),
PeerCount: len(a.sync.Switch().Peers().List()),
CurrentBlock: a.chain.BestBlockHeight(),
NetWorkID: a.sync.NodeInfo().Network,
// IsMining return mining status
func (a *API) IsMining() bool {
- return a.cpuMiner.IsMining()
+ return a.miner.IsMining()
}
// return the peers of current node
"retire": txbuilder.DecodeRetireAction,
"spend_account": a.wallet.AccountMgr.DecodeSpendAction,
"spend_account_unspent_output": a.wallet.AccountMgr.DecodeSpendUTXOAction,
+ "dpos_address": a.wallet.AccountMgr.DecodeDposAction,
}
decoder, ok := decoders[action]
return decoder, ok
if !ok {
return false, errors.WithDetailf(ErrBadActionType, "no action type provided on action %d", i)
}
-
+ if strings.HasPrefix(actionType, "dpos_address") {
+ return false, nil
+ }
if strings.HasPrefix(actionType, "spend") || actionType == "issue" {
count++
}
// DataWitness used sign transaction
type DataWitness chainjson.HexBytes
-func (dw DataWitness) materialize(args *[][]byte) error {
+func (dw DataWitness) Materialize(args *[][]byte) error {
*args = append(*args, dw)
return nil
}
args = t.Arguments
case *types.ClaimInput:
args = t.Arguments
+ case *types.DposTx:
+ args = t.Arguments
}
// Note: These numbers will need to change if more args are added such that the minimum length changes
switch {
totalOutputBTM := uint64(0)
for _, input := range tx.Inputs {
- if input.InputType() != types.CoinbaseInputType && input.AssetID() == *consensus.BTMAssetID {
+ if input.InputType() != types.CoinbaseInputType && input.InputType() != types.DposInputType && input.AssetID() == *consensus.BTMAssetID {
totalInputBTM += input.Amount()
}
}
log "github.com/sirupsen/logrus"
+ "github.com/vapor/crypto/ed25519/chainkd"
chainjson "github.com/vapor/encoding/json"
)
return nil
}
-func (sw RawTxSigWitness) materialize(args *[][]byte) error {
+func (sw *RawTxSigWitness) Sign(tpl *Template, index uint32, xprv chainkd.XPrv) error {
+ if len(sw.Sigs) < len(sw.Keys) {
+ // Each key in sw.Keys may produce a signature in sw.Sigs. Make
+ // sure there are enough slots in sw.Sigs and that we preserve any
+ // sigs already present.
+ newSigs := make([]chainjson.HexBytes, len(sw.Keys))
+ copy(newSigs, sw.Sigs)
+ sw.Sigs = newSigs
+ }
+ for i, keyID := range sw.Keys {
+ if len(sw.Sigs[i]) > 0 {
+ // Already have a signature for this key
+ continue
+ }
+ if keyID.XPub.String() != xprv.XPub().String() {
+ continue
+ }
+ data := tpl.Hash(index).Byte32()
+ sigBytes := xprv.Sign(data[:])
+ // This break is ordered to avoid signing transaction successfully only once for a multiple-sign account
+ // that consist of different keys by the same password. Exit immediately when the signature is success,
+ // it means that only one signature will be successful in the loop for this multiple-sign account.
+ sw.Sigs[i] = sigBytes
+ break
+ }
+ return nil
+}
+
+func (sw RawTxSigWitness) Materialize(args *[][]byte) error {
var nsigs int
for i := 0; i < len(sw.Sigs) && nsigs < sw.Quorum; i++ {
if len(sw.Sigs[i]) > 0 {
return nil
}
-func (sw SignatureWitness) materialize(args *[][]byte) error {
+func (sw *SignatureWitness) Sign(tpl *Template, index uint32, xprv chainkd.XPrv) error {
+ // Compute the predicate to sign. This is either a
+ // txsighash program if tpl.AllowAdditional is false (i.e., the tx is complete
+ // and no further changes are allowed) or a program enforcing
+ // constraints derived from the existing outputs and current input.
+ if len(sw.Program) == 0 {
+ var err error
+ sw.Program, err = buildSigProgram(tpl, tpl.SigningInstructions[index].Position)
+ if err != nil {
+ return err
+ }
+ if len(sw.Program) == 0 {
+ return ErrEmptyProgram
+ }
+ }
+ if len(sw.Sigs) < len(sw.Keys) {
+ // Each key in sw.Keys may produce a signature in sw.Sigs. Make
+ // sure there are enough slots in sw.Sigs and that we preserve any
+ // sigs already present.
+ newSigs := make([]chainjson.HexBytes, len(sw.Keys))
+ copy(newSigs, sw.Sigs)
+ sw.Sigs = newSigs
+ }
+ var h [32]byte
+ sha3pool.Sum256(h[:], sw.Program)
+ for i, keyID := range sw.Keys {
+ if len(sw.Sigs[i]) > 0 {
+ // Already have a signature for this key
+ continue
+ }
+ path := make([][]byte, len(keyID.DerivationPath))
+ for i, p := range keyID.DerivationPath {
+ path[i] = p
+ }
+ if keyID.XPub.String() != xprv.XPub().String() {
+ continue
+ }
+
+ sigBytes := xprv.Sign(h[:])
+
+ // This break is ordered to avoid signing transaction successfully only once for a multiple-sign account
+ // that consist of different keys by the same password. Exit immediately when the signature is success,
+ // it means that only one signature will be successful in the loop for this multiple-sign account.
+ sw.Sigs[i] = sigBytes
+ break
+ }
+ return nil
+}
+
+func (sw SignatureWitness) Materialize(args *[][]byte) error {
// This is the value of N for the CHECKPREDICATE call. The code
// assumes that everything already in the arg list before this call
// to Materialize is input to the signature program, so N is
si.WitnessComponents = append(si.WitnessComponents, sw)
}
+func (si *SigningInstruction) AddWitnessKeysWithOutPath(xpubs []chainkd.XPub, quorum int) {
+
+ keyIDs := make([]keyID, 0, len(xpubs))
+ for _, xpub := range xpubs {
+ keyIDs = append(keyIDs, keyID{XPub: xpub})
+ }
+
+ sw := &SignatureWitness{
+ Quorum: quorum,
+ Keys: keyIDs,
+ }
+ si.WitnessComponents = append(si.WitnessComponents, sw)
+}
+
// AddRawWitnessKeys adds a SignatureWitness with the given quorum and
// list of keys derived by applying the derivation path to each of the
// xpubs.
si.WitnessComponents = append(si.WitnessComponents, sw)
}
+func (si *SigningInstruction) AddRawWitnessKeysWithoutPath(xpubs []chainkd.XPub, quorum int) {
+
+ keyIDs := make([]keyID, 0, len(xpubs))
+ for _, xpub := range xpubs {
+ keyIDs = append(keyIDs, keyID{XPub: xpub})
+ }
+
+ sw := &RawTxSigWitness{
+ Quorum: quorum,
+ Keys: keyIDs,
+ }
+ si.WitnessComponents = append(si.WitnessComponents, sw)
+}
+
// SigningInstruction gives directions for signing inputs in a TxTemplate.
type SigningInstruction struct {
Position uint32 `json:"position"`
// arguments for a VM program via its materialize method. Concrete
// witnessComponent types include SignatureWitness and dataWitness.
type witnessComponent interface {
- materialize(*[][]byte) error
+ Materialize(*[][]byte) error
}
// UnmarshalJSON unmarshal SigningInstruction
var witness [][]byte
for j, wc := range sigInst.WitnessComponents {
- err := wc.materialize(&witness)
+ err := wc.Materialize(&witness)
if err != nil {
return errors.WithDetailf(err, "error in witness component %d of input %d", j, i)
}
--- /dev/null
+package chain
+
+import (
+ "github.com/vapor/protocol/bc"
+ "github.com/vapor/protocol/bc/types"
+)
+
+// Chain is the interface for Bytom core
+type Chain interface {
+ BestBlockHeader() *types.BlockHeader
+ BestBlockHeight() uint64
+ CalcNextSeed(*bc.Hash) (*bc.Hash, error)
+ GetBlockByHash(*bc.Hash) (*types.Block, error)
+ GetBlockByHeight(uint64) (*types.Block, error)
+ GetHeaderByHash(*bc.Hash) (*types.BlockHeader, error)
+ GetHeaderByHeight(uint64) (*types.BlockHeader, error)
+ GetTransactionStatus(*bc.Hash) (*bc.TransactionStatus, error)
+ InMainChain(bc.Hash) bool
+ ProcessBlock(*types.Block) (bool, error)
+ ValidateTx(*types.Tx) (bool, error)
+ GetAuthoritys(string) string
+}
runNodeCmd.Flags().String("signer", config.Signer, "The signer corresponds to xpub of signblock")
runNodeCmd.Flags().String("side.sign_block_xpubs", config.Side.SignBlockXPubs, "Change federated peg to use a different xpub.")
+ runNodeCmd.Flags().String("consensus_config_file", config.ConsensusConfigFile, "consensus configuration file")
+
RootCmd.AddCommand(runNodeCmd)
}
--- /dev/null
+{
+ "consensus":{
+ "dpos": {
+ "period": 1,
+ "epoch": 300,
+ "maxSignersCount": 1,
+ "minVoterBalance": 0,
+ "genesisTimestamp": 1524549600,
+ "coinbase": "vsm1qkm743xmgnvh84pmjchq2s4tnfpgu9ae2f9slep",
+ "xprv": "a8e281b615809046698fb0b0f2804a36d824d48fa443350f10f1b80649d39e5f1e85cf9855548915e36137345910606cbc8e7dd8497c831dce899ee6ac112445",
+ "signers": [
+ "vsm1qkm743xmgnvh84pmjchq2s4tnfpgu9ae2f9slep"
+ ]
+ }
+ }
+}
\ No newline at end of file
package config
import (
- "math/big"
"os"
"os/user"
"path/filepath"
Side *SideChainConfig `mapstructure:"side"`
MainChain *MainChainRpcConfig `mapstructure:"mainchain"`
Websocket *WebsocketConfig `mapstructure:"ws"`
+ Consensus *ConsensusConfig `mapstructure:"consensus"`
}
// Default configurable parameters.
Side: DefaultSideChainConfig(),
MainChain: DefaultMainChainRpc(),
Websocket: DefaultWebsocketConfig(),
+ Consensus: DefaultConsensusCOnfig(),
}
}
// log file name
LogFile string `mapstructure:"log_file"`
- //Validate pegin proof by checking bytom transaction inclusion in mainchain.
+ // Validate pegin proof by checking bytom transaction inclusion in mainchain.
ValidatePegin bool `mapstructure:"validate_pegin"`
Signer string `mapstructure:"signer"`
+
+ ConsensusConfigFile string `mapstructure:"consensus_config_file"`
}
// Default configurable base parameters.
MaxNumConcurrentReqs int `mapstructure:"max_num_concurrent_reqs"`
}
+type ConsensusConfig struct {
+ Dpos *DposConfig `mapstructure:"dpos"`
+}
+
type DposConfig struct {
- Period uint64 `json:"period"` // Number of seconds between blocks to enforce
- Epoch uint64 `json:"epoch"` // Epoch length to reset votes and checkpoint
- MaxSignerCount uint64 `json:"max_signers_count"` // Max count of signers
- MinVoterBalance *big.Int `json:"min_boter_balance"` // Min voter balance to valid this vote
- GenesisTimestamp uint64 `json:"genesis_timestamp"` // The LoopStartTime of first Block
- SelfVoteSigners []common.Address `json:"signers"` // Signers vote by themselves to seal the block, make sure the signer accounts are pre-funded
+ Period uint64 `json:"period"` // Number of seconds between blocks to enforce
+ Epoch uint64 `json:"epoch"` // Epoch length to reset votes and checkpoint
+ MaxSignerCount uint64 `json:"max_signers_count"` // Max count of signers
+ MinVoterBalance uint64 `json:"min_boter_balance"` // Min voter balance to valid this vote
+ GenesisTimestamp uint64 `json:"genesis_timestamp"` // The LoopStartTime of first Block
+ Coinbase string `json:"coinbase"`
+ XPrv string `json:"xprv"`
+ SelfVoteSigners []string `json:"signers"` // Signers vote by themselves to seal the block, make sure the signer accounts are pre-funded
+ Signers []common.Address
}
// Default configurable rpc's auth parameters.
}
}
+func DefaultDposConfig() *DposConfig {
+ return &DposConfig{
+ Period: 1,
+ Epoch: 300,
+ MaxSignerCount: 1,
+ MinVoterBalance: 0,
+ GenesisTimestamp: 1524549600,
+ }
+}
+
+func DefaultConsensusCOnfig() *ConsensusConfig {
+ return &ConsensusConfig{Dpos: DefaultDposConfig()}
+}
+
//-----------------------------------------------------------------------------
// Utils
package config
import (
+ "bytes"
"crypto/sha256"
"encoding/hex"
log "github.com/sirupsen/logrus"
"github.com/vapor/consensus"
- "github.com/vapor/crypto"
"github.com/vapor/crypto/ed25519"
"github.com/vapor/crypto/ed25519/chainkd"
"github.com/vapor/protocol/bc"
func commitToArguments() (res *[32]byte) {
var fedpegPubkeys []ed25519.PublicKey
- var signBlockPubkeys []ed25519.PublicKey
for _, xpub := range consensus.ActiveNetParams.FedpegXPubs {
fedpegPubkeys = append(fedpegPubkeys, xpub.PublicKey())
}
fedpegScript, _ := vmutil.P2SPMultiSigProgram(fedpegPubkeys, len(fedpegPubkeys))
- for _, xpub := range consensus.ActiveNetParams.SignBlockXPubs {
- signBlockPubkeys = append(signBlockPubkeys, xpub.PublicKey())
+ var buffer bytes.Buffer
+ for _, address := range CommonConfig.Consensus.Dpos.Signers {
+ redeemContract := address.ScriptAddress()
+ buffer.Write(redeemContract)
}
- signBlockScript, _ := vmutil.P2SPMultiSigProgram(signBlockPubkeys, len(signBlockPubkeys))
hasher := sha256.New()
hasher.Write(fedpegScript)
- hasher.Write(signBlockScript)
+ hasher.Write(buffer.Bytes())
resSlice := hasher.Sum(nil)
res = new([32]byte)
copy(res[:], resSlice)
types.NewTxOutput(*consensus.BTMAssetID, consensus.InitialBlockSubsidy, contract),
},
}
+
return types.NewTx(txData)
}
log.Panicf("fail on calc genesis tx merkel root")
}
+ var xPrv chainkd.XPrv
+ if CommonConfig.Consensus.Dpos.XPrv == "" {
+ log.Panicf("Signer is empty")
+ }
+ xPrv.UnmarshalText([]byte(CommonConfig.Consensus.Dpos.XPrv))
+ b, _ := xPrv.XPub().MarshalText()
+
block := &types.Block{
BlockHeader: types.BlockHeader{
Version: 1,
TransactionsMerkleRoot: merkleRoot,
TransactionStatusHash: txStatusHash,
},
+ Coinbase: b,
},
Transactions: []*types.Tx{tx},
}
-
- var xPrv chainkd.XPrv
- if consensus.ActiveNetParams.Signer == "" {
- return block
- }
- copy(xPrv[:], []byte(consensus.ActiveNetParams.Signer))
- msg, _ := block.MarshalText()
- sign := xPrv.Sign(msg)
- pubHash := crypto.Ripemd160(xPrv.XPub().PublicKey())
- control, err := vmutil.P2WPKHProgram([]byte(pubHash))
- if err != nil {
- log.Panicf(err.Error())
- }
- block.Proof.Sign = sign
- block.Proof.ControlProgram = control
return block
}
log.Panicf("fail on calc genesis tx merkel root")
}
+ var xPrv chainkd.XPrv
+ if CommonConfig.Consensus.Dpos.XPrv == "" {
+ log.Panicf("Signer is empty")
+ }
+ xPrv.UnmarshalText([]byte(CommonConfig.Consensus.Dpos.XPrv))
+ b, _ := xPrv.XPub().MarshalText()
+
block := &types.Block{
BlockHeader: types.BlockHeader{
Version: 1,
TransactionsMerkleRoot: merkleRoot,
TransactionStatusHash: txStatusHash,
},
+ Coinbase: b,
},
Transactions: []*types.Tx{tx},
}
log.Panicf("fail on calc genesis tx merkel root")
}
+ var xPrv chainkd.XPrv
+ if CommonConfig.Consensus.Dpos.XPrv == "" {
+ log.Panicf("Signer is empty")
+ }
+ xPrv.UnmarshalText([]byte(CommonConfig.Consensus.Dpos.XPrv))
+ b, _ := xPrv.XPub().MarshalText()
+
block := &types.Block{
BlockHeader: types.BlockHeader{
Version: 1,
TransactionsMerkleRoot: merkleRoot,
TransactionStatusHash: txStatusHash,
},
+ Coinbase: b,
},
Transactions: []*types.Tx{tx},
}
--- /dev/null
+package consensus
+
+import (
+ "github.com/vapor/chain"
+ "github.com/vapor/protocol/bc"
+ "github.com/vapor/protocol/bc/types"
+)
+
+// Engine is an algorithm agnostic consensus engine.
+type Engine interface {
+ // Author retrieves the Ethereum address of the account that minted the given
+ // block, which may be different from the header's coinbase if a consensus
+ // engine is based on signatures.
+ Author(header *types.BlockHeader, c chain.Chain) (string, error)
+
+ // VerifyHeader checks whether a header conforms to the consensus rules of a
+ // given engine. Verifying the seal may be done optionally here, or explicitly
+ // via the VerifySeal method.
+ VerifyHeader(c chain.Chain, header *types.BlockHeader, seal bool) error
+
+ // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
+ // concurrently. The method returns a quit channel to abort the operations and
+ // a results channel to retrieve the async verifications (the order is that of
+ // the input slice).
+ VerifyHeaders(c chain.Chain, headers []*types.BlockHeader, seals []bool) (chan<- struct{}, <-chan error)
+
+ // VerifySeal checks whether the crypto seal on a header is valid according to
+ // the consensus rules of the given engine.
+ VerifySeal(c chain.Chain, header *types.BlockHeader) error
+
+ // Prepare initializes the consensus fields of a block header according to the
+ // rules of a particular engine. The changes are executed inline.
+ Prepare(c chain.Chain, header *types.BlockHeader) error
+
+ // Finalize runs any post-transaction state modifications (e.g. block rewards)
+ // and assembles the final block.
+ // Note: The block header and state database might be updated to reflect any
+ // consensus rules that happen at finalization (e.g. block rewards).
+ Finalize(c chain.Chain, header *types.BlockHeader, txs []*bc.Tx) error
+
+ // Seal generates a new block for the given input block with the local miner's
+ // seal place on top.
+ Seal(c chain.Chain, block *types.Block) (*types.Block, error)
+}
--- /dev/null
+package dpos
+
+import (
+ "encoding/json"
+ "strconv"
+ "strings"
+
+ log "github.com/sirupsen/logrus"
+ "github.com/vapor/chain"
+ "github.com/vapor/protocol/bc"
+ "github.com/vapor/protocol/bc/types"
+)
+
+const (
+ /*
+ * vapor:version:category:action/data
+ */
+ vaporPrefix = "vapor"
+ vaporVersion = "1"
+ vaporCategoryEvent = "event"
+ vaporCategoryLog = "oplog"
+ vaporCategorySC = "sc"
+ vaporEventVote = "vote"
+ vaporEventConfirm = "confirm"
+ vaporEventPorposal = "proposal"
+ vaporEventDeclare = "declare"
+
+ vaporMinSplitLen = 3
+ posPrefix = 0
+ posVersion = 1
+ posCategory = 2
+ posEventVote = 3
+ posEventConfirm = 3
+ posEventProposal = 3
+ posEventDeclare = 3
+ posEventConfirmNumber = 4
+
+ /*
+ * proposal type
+ */
+ proposalTypeCandidateAdd = 1
+ proposalTypeCandidateRemove = 2
+ proposalTypeMinerRewardDistributionModify = 3 // count in one thousand
+
+ /*
+ * proposal related
+ */
+ maxValidationLoopCnt = 123500 // About one month if seal each block per second & 21 super nodes
+ minValidationLoopCnt = 12350 // About three days if seal each block per second & 21 super nodes
+ defaultValidationLoopCnt = 30875 // About one week if seal each block per second & 21 super nodes
+)
+
+// Vote :
+// vote come from custom tx which data like "vapor:1:event:vote"
+// Sender of tx is Voter, the tx.to is Candidate
+// Stake is the balance of Voter when create this vote
+type Vote struct {
+ Voter string `json:"Voter"`
+ Candidate string `json:"Candidate"`
+ Stake uint64 `json:"Stake"`
+}
+
+// Confirmation :
+// confirmation come from custom tx which data like "vapor:1:event:confirm:123"
+// 123 is the block number be confirmed
+// Sender of tx is Signer only if the signer in the SignerQueue for block number 123
+type Confirmation struct {
+ Signer string `json:"signer"`
+ BlockNumber uint64 `json:"block_number"`
+}
+
+// Proposal :
+// proposal come from custom tx which data like "vapor:1:event:proposal:candidate:add:address" or "vapor:1:event:proposal:percentage:60"
+// proposal only come from the current candidates
+// not only candidate add/remove , current signer can proposal for params modify like percentage of reward distribution ...
+type Proposal struct {
+ Hash bc.Hash `json:"hash"` // tx hash
+ ValidationLoopCnt uint64 `json:"ValidationLoopCnt"` // validation block number length of this proposal from the received block number
+ ImplementNumber uint64 `json:"ImplementNumber"` // block number to implement modification in this proposal
+ ProposalType uint64 `json:"ProposalType"` // type of proposal 1 - add candidate 2 - remove candidate ...
+ Proposer string `json:"Proposer"` //
+ Candidate string `json:"Candidate"`
+ MinerRewardPerThousand uint64 `json:"MinerRewardPerThousand"`
+ Declares []*Declare `json:"Declares"` // Declare this proposal received
+ ReceivedNumber uint64 `json:"ReceivedNumber"` // block number of proposal received
+}
+
+func (p *Proposal) copy() *Proposal {
+ cpy := &Proposal{
+ Hash: p.Hash,
+ ValidationLoopCnt: p.ValidationLoopCnt,
+ ImplementNumber: p.ImplementNumber,
+ ProposalType: p.ProposalType,
+ Proposer: p.Proposer,
+ Candidate: p.Candidate,
+ MinerRewardPerThousand: p.MinerRewardPerThousand,
+ Declares: make([]*Declare, len(p.Declares)),
+ ReceivedNumber: p.ReceivedNumber,
+ }
+
+ copy(cpy.Declares, p.Declares)
+ return cpy
+}
+
+// Declare :
+// declare come from custom tx which data like "vapor:1:event:declare:hash:yes"
+// proposal only come from the current candidates
+// hash is the hash of proposal tx
+type Declare struct {
+ ProposalHash bc.Hash `json:"ProposalHash"`
+ Declarer string `json:"Declarer"`
+ Decision bool `json:"Decision"`
+}
+
+// HeaderExtra is the struct of info in header.Extra[extraVanity:len(header.extra)-extraSeal]
+type HeaderExtra struct {
+ CurrentBlockConfirmations []Confirmation `json:"current_block_confirmations"`
+ CurrentBlockVotes []Vote `json:"CurrentBlockVotes"`
+ CurrentBlockProposals []Proposal `json:"CurrentBlockProposals"`
+ CurrentBlockDeclares []Declare `json:"CurrentBlockDeclares"`
+ ModifyPredecessorVotes []Vote `json:"ModifyPredecessorVotes"`
+ LoopStartTime uint64 `json:"LoopStartTime"`
+ SignerQueue []string `json:"SignerQueue"`
+ SignerMissing []string `json:"SignerMissing"`
+ ConfirmedBlockNumber uint64 `json:"ConfirmedBlockNumber"`
+}
+
+// Calculate Votes from transaction in this block, write into header.Extra
+func (d *Dpos) processCustomTx(headerExtra HeaderExtra, c chain.Chain, header *types.BlockHeader, txs []*bc.Tx) (HeaderExtra, error) {
+
+ var (
+ snap *Snapshot
+ err error
+ height uint64
+ )
+ height = header.Height
+ if height > 1 {
+ snap, err = d.snapshot(c, height-1, header.PreviousBlockHash, nil, nil, defaultLoopCntRecalculateSigners)
+ if err != nil {
+ return headerExtra, err
+ }
+ }
+
+ for _, tx := range txs {
+ var (
+ from string
+ to string
+ )
+ dpos := new(bc.Dpos)
+ stake := uint64(0)
+ for _, value := range tx.Entries {
+ switch d := value.(type) {
+ case *bc.Dpos:
+ from = d.From
+ to = d.To
+ dpos = d
+ stake = d.Stake
+ default:
+ continue
+ }
+
+ if len(dpos.Data) >= len(vaporPrefix) {
+ txData := dpos.Data
+ txDataInfo := strings.Split(txData, ":")
+ if len(txDataInfo) >= vaporMinSplitLen && txDataInfo[posPrefix] == vaporPrefix && txDataInfo[posVersion] == vaporVersion {
+ switch txDataInfo[posCategory] {
+ case vaporCategoryEvent:
+ if len(txDataInfo) > vaporMinSplitLen {
+ if txDataInfo[posEventVote] == vaporEventVote && (!candidateNeedPD || snap.isCandidate(to)) {
+ headerExtra.CurrentBlockVotes = d.processEventVote(headerExtra.CurrentBlockVotes, stake, from, to)
+ } else if txDataInfo[posEventConfirm] == vaporEventConfirm {
+ headerExtra.CurrentBlockConfirmations = d.processEventConfirm(headerExtra.CurrentBlockConfirmations, c, txDataInfo, height, tx, from)
+ } else if txDataInfo[posEventProposal] == vaporEventPorposal && snap.isCandidate(from) {
+ headerExtra.CurrentBlockProposals = d.processEventProposal(headerExtra.CurrentBlockProposals, txDataInfo, tx, from)
+ } else if txDataInfo[posEventDeclare] == vaporEventDeclare && snap.isCandidate(from) {
+ headerExtra.CurrentBlockDeclares = d.processEventDeclare(headerExtra.CurrentBlockDeclares, txDataInfo, tx, from)
+
+ }
+ } else {
+ // todo : something wrong, leave this transaction to process as normal transaction
+ }
+
+ case vaporCategoryLog:
+ // todo :
+ case vaporCategorySC:
+ // todo :
+ }
+ }
+ }
+ /*
+ if height > 1 {
+ headerExtra.ModifyPredecessorVotes = d.processPredecessorVoter(headerExtra.ModifyPredecessorVotes, stake, from, to, snap)
+ }
+ */
+ }
+ }
+
+ return headerExtra, nil
+}
+
+func (d *Dpos) processEventProposal(currentBlockProposals []Proposal, txDataInfo []string, tx *bc.Tx, proposer string) []Proposal {
+ proposal := Proposal{
+ Hash: tx.ID,
+ ValidationLoopCnt: defaultValidationLoopCnt,
+ ImplementNumber: uint64(1),
+ ProposalType: proposalTypeCandidateAdd,
+ Proposer: proposer,
+ MinerRewardPerThousand: minerRewardPerThousand,
+ Declares: []*Declare{},
+ ReceivedNumber: uint64(0),
+ }
+
+ for i := 0; i < len(txDataInfo[posEventProposal+1:])/2; i++ {
+ k, v := txDataInfo[posEventProposal+1+i*2], txDataInfo[posEventProposal+2+i*2]
+ switch k {
+ case "vlcnt":
+ // If vlcnt is missing then user default value, but if the vlcnt is beyond the min/max value then ignore this proposal
+ if validationLoopCnt, err := strconv.Atoi(v); err != nil || validationLoopCnt < minValidationLoopCnt || validationLoopCnt > maxValidationLoopCnt {
+ return currentBlockProposals
+ } else {
+ proposal.ValidationLoopCnt = uint64(validationLoopCnt)
+ }
+ case "implement_number":
+ if implementNumber, err := strconv.Atoi(v); err != nil || implementNumber <= 0 {
+ return currentBlockProposals
+ } else {
+ proposal.ImplementNumber = uint64(implementNumber)
+ }
+ case "proposal_type":
+ if proposalType, err := strconv.Atoi(v); err != nil || (proposalType != proposalTypeCandidateAdd && proposalType != proposalTypeCandidateRemove && proposalType != proposalTypeMinerRewardDistributionModify) {
+ return currentBlockProposals
+ } else {
+ proposal.ProposalType = uint64(proposalType)
+ }
+ case "candidate":
+ // not check here
+ //proposal.Candidate.UnmarshalText([]byte(v))
+ /*
+ address, err := common.DecodeAddress(v, &consensus.ActiveNetParams)
+ if err != nil {
+ return currentBlockProposals
+ }
+ */
+ proposal.Candidate = v
+ case "mrpt":
+ // miner reward per thousand
+ if mrpt, err := strconv.Atoi(v); err != nil || mrpt < 0 || mrpt > 1000 {
+ return currentBlockProposals
+ } else {
+ proposal.MinerRewardPerThousand = uint64(mrpt)
+ }
+
+ }
+ }
+
+ return append(currentBlockProposals, proposal)
+}
+
+func (d *Dpos) processEventDeclare(currentBlockDeclares []Declare, txDataInfo []string, tx *bc.Tx, declarer string) []Declare {
+ declare := Declare{
+ ProposalHash: bc.Hash{},
+ Declarer: declarer,
+ Decision: true,
+ }
+
+ for i := 0; i < len(txDataInfo[posEventDeclare+1:])/2; i++ {
+ k, v := txDataInfo[posEventDeclare+1+i*2], txDataInfo[posEventDeclare+2+i*2]
+ switch k {
+ case "hash":
+ declare.ProposalHash.UnmarshalText([]byte(v))
+ case "decision":
+ if v == "yes" {
+ declare.Decision = true
+ } else if v == "no" {
+ declare.Decision = false
+ } else {
+ return currentBlockDeclares
+ }
+ }
+ }
+
+ return append(currentBlockDeclares, declare)
+}
+
+func (d *Dpos) processEventVote(currentBlockVotes []Vote, stake uint64, voter, to string) []Vote {
+
+ //if new(big.Int).SetUint64(stake).Cmp(d.config.MinVoterBalance) > 0 {
+ currentBlockVotes = append(currentBlockVotes, Vote{
+ Voter: voter,
+ Candidate: to,
+ Stake: stake,
+ })
+ //}
+ return currentBlockVotes
+}
+
+func (d *Dpos) processEventConfirm(currentBlockConfirmations []Confirmation, c chain.Chain, txDataInfo []string, number uint64, tx *bc.Tx, confirmer string) []Confirmation {
+ if len(txDataInfo) > posEventConfirmNumber {
+ confirmedBlockNumber, err := strconv.Atoi(txDataInfo[posEventConfirmNumber])
+ if err != nil || number-uint64(confirmedBlockNumber) > d.config.MaxSignerCount || number-uint64(confirmedBlockNumber) < 0 {
+ return currentBlockConfirmations
+ }
+ confirmedHeader, err := c.GetBlockByHeight(uint64(confirmedBlockNumber))
+ if confirmedHeader == nil {
+ log.Info("Fail to get confirmedHeader")
+ return currentBlockConfirmations
+ }
+ confirmedHeaderExtra := HeaderExtra{}
+ if extraVanity+extraSeal > len(confirmedHeader.Extra) {
+ return currentBlockConfirmations
+ }
+ //err = rlp.DecodeBytes(confirmedHeader.Extra[extraVanity:len(confirmedHeader.Extra)-extraSeal], &confirmedHeaderExtra)
+ if err := json.Unmarshal(confirmedHeader.Extra[extraVanity:len(confirmedHeader.Extra)-extraSeal], &confirmedHeaderExtra); err != nil {
+ log.Info("Fail to decode parent header", "err", err)
+ return currentBlockConfirmations
+ }
+ for _, s := range confirmedHeaderExtra.SignerQueue {
+ if s == confirmer {
+ currentBlockConfirmations = append(currentBlockConfirmations, Confirmation{
+ Signer: confirmer,
+ BlockNumber: uint64(confirmedBlockNumber),
+ })
+ break
+ }
+ }
+ }
+
+ return currentBlockConfirmations
+}
+
+func (d *Dpos) processPredecessorVoter(modifyPredecessorVotes []Vote, stake uint64, voter, to string, snap *Snapshot) []Vote {
+ if stake > 0 {
+ if snap.isVoter(voter) {
+ modifyPredecessorVotes = append(modifyPredecessorVotes, Vote{
+ Voter: voter,
+ Candidate: "",
+ Stake: stake,
+ })
+ }
+ if snap.isVoter(to) {
+ modifyPredecessorVotes = append(modifyPredecessorVotes, Vote{
+ Voter: to,
+ Candidate: "",
+ Stake: stake,
+ })
+ }
+
+ }
+ return modifyPredecessorVotes
+}
--- /dev/null
+package dpos
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "math/big"
+ "sync"
+ "time"
+
+ lru "github.com/hashicorp/golang-lru"
+ log "github.com/sirupsen/logrus"
+ "github.com/vapor/chain"
+ "github.com/vapor/common"
+ "github.com/vapor/config"
+ "github.com/vapor/consensus"
+ "github.com/vapor/crypto"
+ "github.com/vapor/crypto/ed25519/chainkd"
+ "github.com/vapor/protocol"
+ "github.com/vapor/protocol/bc"
+ "github.com/vapor/protocol/bc/types"
+ "github.com/vapor/protocol/vm/vmutil"
+)
+
+const (
+ inMemorySnapshots = 128 // Number of recent vote snapshots to keep in memory
+ inMemorySignatures = 4096 // Number of recent block signatures to keep in memory
+ secondsPerYear = 365 * 24 * 3600 // Number of seconds for one year
+ checkpointInterval = 360 // About N hours if config.period is N
+ module = "dpos"
+)
+
+//delegated-proof-of-stake protocol constants.
+var (
+ SignerBlockReward = big.NewInt(5e+18) // Block reward in wei for successfully mining a block first year
+ defaultEpochLength = uint64(3000000) // Default number of blocks after which vote's period of validity
+ defaultBlockPeriod = uint64(3) // Default minimum difference between two consecutive block's timestamps
+ defaultMaxSignerCount = uint64(21) //
+ //defaultMinVoterBalance = new(big.Int).Mul(big.NewInt(10000), big.NewInt(1e+18))
+ defaultMinVoterBalance = uint64(0)
+ extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity
+ extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal
+ defaultDifficulty = big.NewInt(1) // Default difficulty
+ defaultLoopCntRecalculateSigners = uint64(10) // Default loop count to recreate signers from top tally
+ minerRewardPerThousand = uint64(618) // Default reward for miner in each block from block reward (618/1000)
+ candidateNeedPD = false // is new candidate need Proposal & Declare process
+)
+
+var (
+ // errUnknownBlock is returned when the list of signers is requested for a block
+ // that is not part of the local blockchain.
+ errUnknownBlock = errors.New("unknown block")
+
+ // errMissingVanity is returned if a block's extra-data section is shorter than
+ // 32 bytes, which is required to store the signer vanity.
+ errMissingVanity = errors.New("extra-data 32 byte vanity prefix missing")
+
+ // errMissingSignature is returned if a block's extra-data section doesn't seem
+ // to contain a 65 byte secp256k1 signature.
+ errMissingSignature = errors.New("extra-data 65 byte suffix signature missing")
+
+ // errInvalidMixDigest is returned if a block's mix digest is non-zero.
+ errInvalidMixDigest = errors.New("non-zero mix digest")
+
+ // errInvalidUncleHash is returned if a block contains an non-empty uncle list.
+ errInvalidUncleHash = errors.New("non empty uncle hash")
+
+ // ErrInvalidTimestamp is returned if the timestamp of a block is lower than
+ // the previous block's timestamp + the minimum block period.
+ ErrInvalidTimestamp = errors.New("invalid timestamp")
+
+ // errInvalidVotingChain is returned if an authorization list is attempted to
+ // be modified via out-of-range or non-contiguous headers.
+ errInvalidVotingChain = errors.New("invalid voting chain")
+
+ // errUnauthorized is returned if a header is signed by a non-authorized entity.
+ errUnauthorized = errors.New("unauthorized")
+
+ // errPunishedMissing is returned if a header calculate punished signer is wrong.
+ errPunishedMissing = errors.New("punished signer missing")
+
+ // errWaitTransactions is returned if an empty block is attempted to be sealed
+ // on an instant chain (0 second period). It's important to refuse these as the
+ // block reward is zero, so an empty block just bloats the chain... fast.
+ errWaitTransactions = errors.New("waiting for transactions")
+
+ // errUnclesNotAllowed is returned if uncles exists
+ errUnclesNotAllowed = errors.New("uncles not allowed")
+
+ // errCreateSignerQueueNotAllowed is returned if called in (block number + 1) % maxSignerCount != 0
+ errCreateSignerQueueNotAllowed = errors.New("create signer queue not allowed")
+
+ // errInvalidSignerQueue is returned if verify SignerQueue fail
+ errInvalidSignerQueue = errors.New("invalid signer queue")
+
+ // errSignerQueueEmpty is returned if no signer when calculate
+ errSignerQueueEmpty = errors.New("signer queue is empty")
+)
+
+type Dpos struct {
+ config *config.DposConfig // Consensus engine configuration parameters
+ store protocol.Store // Database to store and retrieve snapshot checkpoints
+ recents *lru.ARCCache // Snapshots for recent block to speed up reorgs
+ signatures *lru.ARCCache // Signatures of recent blocks to speed up mining
+ signer string // Ethereum address of the signing key
+ signFn SignerFn // Signer function to authorize hashes with
+ signTxFn SignTxFn // Sign transaction function to sign tx
+ lock sync.RWMutex // Protects the signer fields
+ lcsc uint64 // Last confirmed side chain
+}
+
+// SignerFn is a signer callback function to request a hash to be signed by a backing account.
+type SignerFn func(string, []byte) ([]byte, error)
+
+// SignTxFn is a signTx
+type SignTxFn func(string, *bc.Tx, *big.Int) (*bc.Tx, error)
+
+//
+func ecrecover(header *types.BlockHeader, sigcache *lru.ARCCache, c chain.Chain) (string, error) {
+
+ xpub := &chainkd.XPub{}
+ xpub.UnmarshalText(header.Coinbase)
+ derivedPK := xpub.PublicKey()
+ pubHash := crypto.Ripemd160(derivedPK)
+ address, err := common.NewAddressWitnessPubKeyHash(pubHash, &consensus.ActiveNetParams)
+ if err != nil {
+ return "", err
+ }
+
+ return address.EncodeAddress(), nil
+}
+
+//
+func New(config *config.DposConfig, store protocol.Store) *Dpos {
+ conf := *config
+ if conf.Epoch == 0 {
+ conf.Epoch = defaultEpochLength
+ }
+ if conf.Period == 0 {
+ conf.Period = defaultBlockPeriod
+ }
+ if conf.MaxSignerCount == 0 {
+ conf.MaxSignerCount = defaultMaxSignerCount
+ }
+ if conf.MinVoterBalance == 0 {
+ conf.MinVoterBalance = defaultMinVoterBalance
+ }
+ // Allocate the snapshot caches and create the engine
+ recents, _ := lru.NewARC(inMemorySnapshots)
+ signatures, _ := lru.NewARC(inMemorySignatures)
+ return &Dpos{
+ config: &conf,
+ store: store,
+ recents: recents,
+ signatures: signatures,
+ }
+}
+
+// Authorize injects a private key into the consensus engine to mint new blocks with.
+func (d *Dpos) Authorize(signer string /*, signFn SignerFn*/) {
+ d.lock.Lock()
+ defer d.lock.Unlock()
+
+ d.signer = signer
+ //d.signFn = signFn
+}
+
+// 从BLockHeader中获取到地址
+func (d *Dpos) Author(header *types.BlockHeader, c chain.Chain) (string, error) {
+ return ecrecover(header, d.signatures, c)
+}
+
+func (d *Dpos) VerifyHeader(c chain.Chain, header *types.BlockHeader, seal bool) error {
+ return d.verifyCascadingFields(c, header, nil)
+}
+
+func (d *Dpos) VerifyHeaders(c chain.Chain, headers []*types.BlockHeader, seals []bool) (chan<- struct{}, <-chan error) {
+ return nil, nil
+}
+
+func (d *Dpos) VerifySeal(c chain.Chain, header *types.BlockHeader) error {
+ return nil
+}
+
+func (d *Dpos) verifyHeader(c chain.Chain, header *types.BlockHeader, parents []*types.BlockHeader) error {
+ return nil
+}
+
+func (d *Dpos) verifyCascadingFields(c chain.Chain, header *types.BlockHeader, parents []*types.BlockHeader) error {
+ // The genesis block is the always valid dead-end
+ height := header.Height
+ if height == 0 {
+ return nil
+ }
+
+ var (
+ parent *types.BlockHeader
+ err error
+ )
+
+ if len(parents) > 0 {
+ parent = parents[len(parents)-1]
+ } else {
+ parent, err = c.GetHeaderByHeight(height - 1)
+ if err != nil {
+ return err
+ }
+ }
+
+ if parent == nil {
+ return errors.New("unknown ancestor")
+ }
+
+ if _, err = d.snapshot(c, height-1, header.PreviousBlockHash, parents, nil, defaultLoopCntRecalculateSigners); err != nil {
+ return err
+ }
+
+ return d.verifySeal(c, header, parents)
+}
+
+// verifySeal checks whether the signature contained in the header satisfies the
+// consensus protocol requirements. The method accepts an optional list of parent
+// headers that aren't yet part of the local blockchain to generate the snapshots
+// from.
+func (d *Dpos) verifySeal(c chain.Chain, header *types.BlockHeader, parents []*types.BlockHeader) error {
+ height := header.Height
+ if height == 0 {
+ return errUnknownBlock
+ }
+ // Retrieve the snapshot needed to verify this header and cache it
+ snap, err := d.snapshot(c, height-1, header.PreviousBlockHash, parents, nil, defaultLoopCntRecalculateSigners)
+ if err != nil {
+ return err
+ }
+
+ // Resolve the authorization key and check against signers
+ signer, err := ecrecover(header, d.signatures, c)
+ if err != nil {
+ return err
+ }
+
+ if height > d.config.MaxSignerCount {
+ var (
+ parent *types.BlockHeader
+ err error
+ )
+ if len(parents) > 0 {
+ parent = parents[len(parents)-1]
+ } else {
+ if parent, err = c.GetHeaderByHeight(height - 1); err != nil {
+ return err
+ }
+ }
+
+ //parent
+ xpub := &chainkd.XPub{}
+ xpub.UnmarshalText(parent.Coinbase)
+ derivedPK := xpub.PublicKey()
+ pubHash := crypto.Ripemd160(derivedPK)
+ parentCoinbase, err := common.NewAddressWitnessPubKeyHash(pubHash, &consensus.ActiveNetParams)
+ if err != nil {
+ return err
+ }
+
+ //current
+ xpub.UnmarshalText(header.Coinbase)
+ derivedPK = xpub.PublicKey()
+ pubHash = crypto.Ripemd160(derivedPK)
+ currentCoinbase, err := common.NewAddressWitnessPubKeyHash(pubHash, &consensus.ActiveNetParams)
+ if err != nil {
+ return err
+ }
+
+ parentHeaderExtra := HeaderExtra{}
+ if err = json.Unmarshal(parent.Extra[extraVanity:len(parent.Extra)-extraSeal], &parentHeaderExtra); err != nil {
+ return err
+ }
+
+ currentHeaderExtra := HeaderExtra{}
+ if err = json.Unmarshal(header.Extra[extraVanity:len(header.Extra)-extraSeal], ¤tHeaderExtra); err != nil {
+ return err
+ }
+
+ // verify signerqueue
+ if height%d.config.MaxSignerCount == 0 {
+ err := snap.verifySignerQueue(currentHeaderExtra.SignerQueue)
+ if err != nil {
+ return err
+ }
+
+ } else {
+ for i := 0; i < int(d.config.MaxSignerCount); i++ {
+ if parentHeaderExtra.SignerQueue[i] != currentHeaderExtra.SignerQueue[i] {
+ return errInvalidSignerQueue
+ }
+ }
+ }
+ // verify missing signer for punish
+ parentSignerMissing := getSignerMissing(parentCoinbase.EncodeAddress(), currentCoinbase.EncodeAddress(), parentHeaderExtra)
+ if len(parentSignerMissing) != len(currentHeaderExtra.SignerMissing) {
+ return errPunishedMissing
+ }
+ for i, signerMissing := range currentHeaderExtra.SignerMissing {
+ if parentSignerMissing[i] != signerMissing {
+ return errPunishedMissing
+ }
+ }
+
+ }
+
+ if !snap.inturn(signer, header.Timestamp) {
+ return errUnauthorized
+ }
+
+ return nil
+}
+
+// Prepare implements consensus.Engine, preparing all the consensus fields of the header for running the transactions on top.
+func (d *Dpos) Prepare(c chain.Chain, header *types.BlockHeader) error {
+ if d.config.GenesisTimestamp < uint64(time.Now().Unix()) {
+ return nil
+ }
+
+ if header.Height == 1 {
+ for {
+ delay := time.Unix(int64(d.config.GenesisTimestamp-2), 0).Sub(time.Now())
+ if delay <= time.Duration(0) {
+ log.WithFields(log.Fields{"module": module, "time": time.Now()}).Info("Ready for seal block")
+ break
+ } else if delay > time.Duration(d.config.Period)*time.Second {
+ delay = time.Duration(d.config.Period) * time.Second
+ }
+ log.WithFields(log.Fields{"module": module, "delay": time.Duration(time.Unix(int64(d.config.GenesisTimestamp-2), 0).Sub(time.Now()))}).Info("Waiting for seal block")
+ select {
+ case <-time.After(delay):
+ continue
+ }
+ }
+ }
+ return nil
+}
+
+func (d *Dpos) Finalize(c chain.Chain, header *types.BlockHeader, txs []*bc.Tx) error {
+ height := header.Height
+ parent, err := c.GetHeaderByHeight(height - 1)
+ if parent == nil {
+ return err
+ }
+ //parent
+ var xpub chainkd.XPub
+ xpub.UnmarshalText(parent.Coinbase)
+ pubHash := crypto.Ripemd160(xpub.PublicKey())
+ parentCoinbase, err := common.NewAddressWitnessPubKeyHash(pubHash, &consensus.ActiveNetParams)
+ if err != nil {
+ return err
+ }
+
+ //current
+ xpub.UnmarshalText(header.Coinbase)
+ pubHash = crypto.Ripemd160(xpub.PublicKey())
+ currentCoinbase, err := common.NewAddressWitnessPubKeyHash(pubHash, &consensus.ActiveNetParams)
+ if err != nil {
+ return err
+ }
+
+ //header.Timestamp
+ t := new(big.Int).Add(new(big.Int).SetUint64(parent.Timestamp), new(big.Int).SetUint64(d.config.Period))
+ header.Timestamp = t.Uint64()
+
+ if header.Timestamp < uint64(time.Now().Unix()) {
+ header.Timestamp = uint64(time.Now().Unix())
+ }
+
+ if len(header.Extra) < extraVanity {
+ header.Extra = append(header.Extra, bytes.Repeat([]byte{0x00}, extraVanity-len(header.Extra))...)
+ }
+
+ header.Extra = header.Extra[:extraVanity]
+ // genesisVotes write direct into snapshot, which number is 1
+ var genesisVotes []*Vote
+ parentHeaderExtra := HeaderExtra{}
+ currentHeaderExtra := HeaderExtra{}
+ if height == 1 {
+ alreadyVote := make(map[string]struct{})
+ for _, voter := range d.config.SelfVoteSigners {
+ if _, ok := alreadyVote[voter]; !ok {
+ genesisVotes = append(genesisVotes, &Vote{
+ Voter: voter,
+ Candidate: voter,
+ Stake: 0,
+ //Stake: state.GetBalance(voter),
+ })
+ alreadyVote[voter] = struct{}{}
+ }
+ }
+ } else {
+ parentHeaderExtraByte := parent.Extra[extraVanity : len(parent.Extra)-extraSeal]
+ if err := json.Unmarshal(parentHeaderExtraByte, &parentHeaderExtra); err != nil {
+ return err
+ }
+ currentHeaderExtra.ConfirmedBlockNumber = parentHeaderExtra.ConfirmedBlockNumber
+ currentHeaderExtra.SignerQueue = parentHeaderExtra.SignerQueue
+ currentHeaderExtra.LoopStartTime = parentHeaderExtra.LoopStartTime
+ currentHeaderExtra.SignerMissing = getSignerMissing(parentCoinbase.EncodeAddress(), currentCoinbase.EncodeAddress(), parentHeaderExtra)
+ }
+
+ // calculate votes write into header.extra
+ currentHeaderExtra, err = d.processCustomTx(currentHeaderExtra, c, header, txs)
+ if err != nil {
+ return err
+ }
+ // Assemble the voting snapshot to check which votes make sense
+ snap, err := d.snapshot(c, height-1, header.PreviousBlockHash, nil, genesisVotes, defaultLoopCntRecalculateSigners)
+ if err != nil {
+ return err
+ }
+
+ currentHeaderExtra.ConfirmedBlockNumber = snap.getLastConfirmedBlockNumber(currentHeaderExtra.CurrentBlockConfirmations).Uint64()
+ // write signerQueue in first header, from self vote signers in genesis block
+ if height == 1 {
+ currentHeaderExtra.LoopStartTime = d.config.GenesisTimestamp
+ for i := 0; i < int(d.config.MaxSignerCount); i++ {
+ currentHeaderExtra.SignerQueue = append(currentHeaderExtra.SignerQueue, d.config.SelfVoteSigners[i%len(d.config.SelfVoteSigners)])
+ }
+ }
+ if height%d.config.MaxSignerCount == 0 {
+ //currentHeaderExtra.LoopStartTime = header.Time.Uint64()
+ currentHeaderExtra.LoopStartTime = currentHeaderExtra.LoopStartTime + d.config.Period*d.config.MaxSignerCount
+ // create random signersQueue in currentHeaderExtra by snapshot.Tally
+ currentHeaderExtra.SignerQueue = []string{}
+ newSignerQueue, err := snap.createSignerQueue()
+ if err != nil {
+ return err
+ }
+
+ currentHeaderExtra.SignerQueue = newSignerQueue
+
+ }
+ // encode header.extra
+ currentHeaderExtraEnc, err := json.Marshal(currentHeaderExtra)
+ if err != nil {
+ return err
+ }
+ header.Extra = append(header.Extra, currentHeaderExtraEnc...)
+ header.Extra = append(header.Extra, make([]byte, extraSeal)...)
+ return nil
+}
+
+func (d *Dpos) Seal(c chain.Chain, block *types.Block) (*types.Block, error) {
+ header := block.BlockHeader
+ height := header.Height
+ if height == 0 {
+ return nil, errUnknownBlock
+ }
+
+ if d.config.Period == 0 && len(block.Transactions) == 0 {
+ return nil, errWaitTransactions
+ }
+ // Bail out if we're unauthorized to sign a block
+ snap, err := d.snapshot(c, height-1, header.PreviousBlockHash, nil, nil, defaultLoopCntRecalculateSigners)
+ if err != nil {
+ return nil, err
+ }
+ if !snap.inturn(d.signer, header.Timestamp) {
+ return nil, errUnauthorized
+ }
+
+ var xPrv chainkd.XPrv
+ if config.CommonConfig.Consensus.Dpos.XPrv == "" {
+ return nil, errors.New("Signer is empty")
+ }
+ xPrv.UnmarshalText([]byte(config.CommonConfig.Consensus.Dpos.XPrv))
+ sign := xPrv.Sign(block.BlockCommitment.TransactionsMerkleRoot.Bytes())
+ pubHash := crypto.Ripemd160(xPrv.XPub().PublicKey())
+
+ control, err := vmutil.P2WPKHProgram([]byte(pubHash))
+ if err != nil {
+ return nil, err
+ }
+
+ block.Proof = types.Proof{Sign: sign, ControlProgram: control}
+ return block, nil
+}
+
+func (d *Dpos) IsSealer(c chain.Chain, hash bc.Hash, header *types.BlockHeader, headerTime uint64) (bool, error) {
+ var (
+ snap *Snapshot
+ headers []*types.BlockHeader
+ )
+ h := hash
+ height := header.Height
+ for snap == nil {
+ // If an in-memory snapshot was found, use that
+ if s, ok := d.recents.Get(h); ok {
+ snap = s.(*Snapshot)
+ break
+ }
+ // If an on-disk checkpoint snapshot can be found, use that
+ if height%checkpointInterval == 0 {
+ if s, err := loadSnapshot(d.config, d.signatures, d.store, h); err == nil {
+ log.WithFields(log.Fields{"func": "IsSealer", "number": height, "hash": h}).Warn("Loaded voting snapshot from disk")
+ snap = s
+ break
+ } else {
+ log.Warn("loadSnapshot:", err)
+ }
+ }
+
+ if height == 0 {
+ genesis, err := c.GetHeaderByHeight(0)
+ if err != nil {
+ return false, err
+ }
+ var genesisVotes []*Vote
+ alreadyVote := make(map[string]struct{})
+ for _, voter := range d.config.SelfVoteSigners {
+ if _, ok := alreadyVote[voter]; !ok {
+ genesisVotes = append(genesisVotes, &Vote{
+ Voter: voter,
+ Candidate: voter,
+ Stake: 0,
+ //Stake: state.GetBalance(voter),
+ })
+ alreadyVote[voter] = struct{}{}
+ }
+ }
+ snap = newSnapshot(d.config, d.signatures, genesis.Hash(), genesisVotes, defaultLoopCntRecalculateSigners)
+ if err := snap.store(d.store); err != nil {
+ return false, err
+ }
+ log.Info("Stored genesis voting snapshot to disk")
+ break
+ }
+
+ header, err := c.GetHeaderByHeight(height)
+ if header == nil || err != nil {
+ return false, errors.New("unknown ancestor")
+ }
+
+ height, h = height-1, header.PreviousBlockHash
+ }
+
+ snap, err := snap.apply(headers)
+ if err != nil {
+ return false, err
+ }
+
+ d.recents.Add(snap.Hash, snap)
+
+ if snap != nil {
+ loopIndex := int((headerTime-snap.LoopStartTime)/snap.config.Period) % len(snap.Signers)
+ if loopIndex >= len(snap.Signers) {
+ return false, nil
+ } else if *snap.Signers[loopIndex] != d.signer {
+ return false, nil
+
+ }
+ return true, nil
+ } else {
+ return false, nil
+ }
+}
+
+// snapshot retrieves the authorization snapshot at a given point in time.
+func (d *Dpos) snapshot(c chain.Chain, number uint64, hash bc.Hash, parents []*types.BlockHeader, genesisVotes []*Vote, lcrs uint64) (*Snapshot, error) {
+
+ var (
+ headers []*types.BlockHeader
+ snap *Snapshot
+ )
+ h := hash
+
+ for snap == nil {
+ // If an in-memory snapshot was found, use that
+ if s, ok := d.recents.Get(h); ok {
+ snap = s.(*Snapshot)
+ break
+ }
+ // If an on-disk checkpoint snapshot can be found, use that
+ if number%checkpointInterval == 0 {
+ if s, err := loadSnapshot(d.config, d.signatures, d.store, h); err == nil {
+ log.WithFields(log.Fields{"number": number, "hash": h}).Warn("Loaded voting snapshot from disk")
+ snap = s
+ break
+ }
+ }
+ if number == 0 {
+ genesis, err := c.GetHeaderByHeight(0)
+ if err != nil {
+ return nil, err
+ }
+ if err := d.VerifyHeader(c, genesis, false); err != nil {
+ return nil, err
+ }
+
+ snap = newSnapshot(d.config, d.signatures, genesis.Hash(), genesisVotes, lcrs)
+ if err := snap.store(d.store); err != nil {
+ return nil, err
+ }
+ log.Info("Stored genesis voting snapshot to disk")
+ break
+ }
+ var header *types.BlockHeader
+ if len(parents) > 0 {
+ header = parents[len(parents)-1]
+ if header.Hash() != h || header.Height != number {
+ return nil, errors.New("unknown ancestor")
+ }
+ parents = parents[:len(parents)-1]
+ } else {
+ var err error
+ header, err = c.GetHeaderByHeight(number)
+ if header == nil || err != nil {
+ return nil, errors.New("unknown ancestor")
+ }
+ }
+ headers = append(headers, header)
+ number, h = number-1, header.PreviousBlockHash
+ }
+
+ // Previous snapshot found, apply any pending headers on top of it
+ for i := 0; i < len(headers)/2; i++ {
+ headers[i], headers[len(headers)-1-i] = headers[len(headers)-1-i], headers[i]
+ }
+ snap, err := snap.apply(headers)
+ if err != nil {
+ return nil, err
+ }
+ d.recents.Add(snap.Hash, snap)
+
+ // If we've generated a new checkpoint snapshot, save to disk
+ if snap.Number%checkpointInterval == 0 && len(headers) > 0 {
+ if err = snap.store(d.store); err != nil {
+ return nil, err
+ }
+ log.Info("Stored voting snapshot to disk", "number", snap.Number, "hash", snap.Hash)
+ }
+ return snap, err
+}
+
+// Get the signer missing from last signer till header.Coinbase
+func getSignerMissing(lastSigner string, currentSigner string, extra HeaderExtra) []string {
+
+ var signerMissing []string
+ recordMissing := false
+ for _, signer := range extra.SignerQueue {
+ if signer == lastSigner {
+ recordMissing = true
+ continue
+ }
+ if signer == currentSigner {
+ break
+ }
+ if recordMissing {
+ signerMissing = append(signerMissing, signer)
+ }
+ }
+ return signerMissing
+}
--- /dev/null
+package dpos
+
+import (
+ "bytes"
+ "sort"
+
+ "github.com/vapor/protocol/bc"
+)
+
+type TallyItem struct {
+ addr string
+ stake uint64
+}
+
+type TallySlice []TallyItem
+
+func (s TallySlice) Len() int { return len(s) }
+func (s TallySlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s TallySlice) Less(i, j int) bool {
+ //we need sort reverse, so ...
+ if s[i].stake > s[j].stake {
+ return true
+
+ } else if s[i].stake < s[j].stake {
+ return false
+ }
+ // if the stake equal
+ //return bytes.Compare(s[i].addr.ScriptAddress(), s[j].addr.ScriptAddress()) > 0
+ return s[i].addr > s[j].addr
+}
+
+type SignerItem struct {
+ addr string
+ hash bc.Hash
+}
+
+type SignerSlice []SignerItem
+
+func (s SignerSlice) Len() int { return len(s) }
+func (s SignerSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s SignerSlice) Less(i, j int) bool {
+ return bytes.Compare(s[i].hash.Bytes(), s[j].hash.Bytes()) > 0
+}
+
+// verify the SignerQueue base on block hash
+func (s *Snapshot) verifySignerQueue(signerQueue []string) error {
+
+ if len(signerQueue) > int(s.config.MaxSignerCount) {
+ return errInvalidSignerQueue
+ }
+ sq, err := s.createSignerQueue()
+ if err != nil {
+ return err
+ }
+ if len(sq) == 0 || len(sq) != len(signerQueue) {
+ return errInvalidSignerQueue
+ }
+ for i, signer := range signerQueue {
+ if signer != sq[i] {
+ return errInvalidSignerQueue
+ }
+ }
+
+ return nil
+}
+
+func (s *Snapshot) buildTallySlice() TallySlice {
+ var tallySlice TallySlice
+ for address, stake := range s.Tally {
+ if !candidateNeedPD || s.isCandidate(address) {
+ if _, ok := s.Punished[address]; ok {
+ var creditWeight uint64
+ if s.Punished[address] > defaultFullCredit-minCalSignerQueueCredit {
+ creditWeight = minCalSignerQueueCredit
+ } else {
+ creditWeight = defaultFullCredit - s.Punished[address]
+ }
+ tallySlice = append(tallySlice, TallyItem{address, stake * creditWeight})
+ } else {
+ tallySlice = append(tallySlice, TallyItem{address, stake * defaultFullCredit})
+ }
+ }
+ }
+ return tallySlice
+}
+
+func (s *Snapshot) createSignerQueue() ([]string, error) {
+
+ if (s.Number+1)%s.config.MaxSignerCount != 0 || s.Hash != s.HistoryHash[len(s.HistoryHash)-1] {
+ return nil, errCreateSignerQueueNotAllowed
+ }
+
+ var signerSlice SignerSlice
+ var topStakeAddress []string
+ if (s.Number+1)%(s.config.MaxSignerCount*s.LCRS) == 0 {
+ // before recalculate the signers, clear the candidate is not in snap.Candidates
+
+ // only recalculate signers from to tally per 10 loop,
+ // other loop end just reset the order of signers by block hash (nearly random)
+ tallySlice := s.buildTallySlice()
+ sort.Sort(TallySlice(tallySlice))
+ queueLength := int(s.config.MaxSignerCount)
+ if queueLength > len(tallySlice) {
+ queueLength = len(tallySlice)
+ }
+ if queueLength == defaultOfficialMaxSignerCount && len(tallySlice) > defaultOfficialThirdLevelCount {
+ for i, tallyItem := range tallySlice[:defaultOfficialFirstLevelCount] {
+ signerSlice = append(signerSlice, SignerItem{tallyItem.addr, s.HistoryHash[len(s.HistoryHash)-1-i]})
+ }
+ var signerSecondLevelSlice, signerThirdLevelSlice, signerLastLevelSlice SignerSlice
+ // 60%
+ for i, tallyItem := range tallySlice[defaultOfficialFirstLevelCount:defaultOfficialSecondLevelCount] {
+ signerSecondLevelSlice = append(signerSecondLevelSlice, SignerItem{tallyItem.addr, s.HistoryHash[len(s.HistoryHash)-1-i]})
+ }
+ sort.Sort(SignerSlice(signerSecondLevelSlice))
+ signerSlice = append(signerSlice, signerSecondLevelSlice[:6]...)
+ // 40%
+ for i, tallyItem := range tallySlice[defaultOfficialSecondLevelCount:defaultOfficialThirdLevelCount] {
+ signerThirdLevelSlice = append(signerThirdLevelSlice, SignerItem{tallyItem.addr, s.HistoryHash[len(s.HistoryHash)-1-i]})
+ }
+ sort.Sort(SignerSlice(signerThirdLevelSlice))
+ signerSlice = append(signerSlice, signerThirdLevelSlice[:4]...)
+ // choose 1 from last
+ for i, tallyItem := range tallySlice[defaultOfficialThirdLevelCount:] {
+ signerLastLevelSlice = append(signerLastLevelSlice, SignerItem{tallyItem.addr, s.HistoryHash[len(s.HistoryHash)-1-i]})
+ }
+ sort.Sort(SignerSlice(signerLastLevelSlice))
+ signerSlice = append(signerSlice, signerLastLevelSlice[0])
+
+ } else {
+ for i, tallyItem := range tallySlice[:queueLength] {
+ signerSlice = append(signerSlice, SignerItem{tallyItem.addr, s.HistoryHash[len(s.HistoryHash)-1-i]})
+ }
+
+ }
+
+ } else {
+ for i, signer := range s.Signers {
+ signerSlice = append(signerSlice, SignerItem{*signer, s.HistoryHash[len(s.HistoryHash)-1-i]})
+ }
+ }
+
+ sort.Sort(SignerSlice(signerSlice))
+ // Set the top candidates in random order base on block hash
+ if len(signerSlice) == 0 {
+ return nil, errSignerQueueEmpty
+ }
+ for i := 0; i < int(s.config.MaxSignerCount); i++ {
+ topStakeAddress = append(topStakeAddress, signerSlice[i%len(signerSlice)].addr)
+ }
+
+ return topStakeAddress, nil
+
+}
--- /dev/null
+package dpos
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math/big"
+ "sort"
+ "time"
+
+ lru "github.com/hashicorp/golang-lru"
+ "github.com/vapor/common"
+ "github.com/vapor/config"
+ "github.com/vapor/consensus"
+ "github.com/vapor/crypto"
+ "github.com/vapor/crypto/ed25519/chainkd"
+ "github.com/vapor/protocol"
+ "github.com/vapor/protocol/bc"
+ "github.com/vapor/protocol/bc/types"
+)
+
+const (
+ defaultFullCredit = 1000 // no punished
+ missingPublishCredit = 100 // punished for missing one block seal
+ signRewardCredit = 10 // seal one block
+ autoRewardCredit = 1 // credit auto recover for each block
+ minCalSignerQueueCredit = 300 // when calculate the signerQueue
+ defaultOfficialMaxSignerCount = 21 // official max signer count
+ defaultOfficialFirstLevelCount = 10 // official first level , 100% in signer queue
+ defaultOfficialSecondLevelCount = 20 // official second level, 60% in signer queue
+ defaultOfficialThirdLevelCount = 30 // official third level, 40% in signer queue
+ // the credit of one signer is at least minCalSignerQueueCredit
+ candidateStateNormal = 1
+ candidateMaxLen = 500 // if candidateNeedPD is false and candidate is more than candidateMaxLen, then minimum tickets candidates will be remove in each LCRS*loop
+)
+
+var errIncorrectTallyCount = errors.New("incorrect tally count")
+
+type Snapshot struct {
+ config *config.DposConfig // Consensus engine configuration parameters
+ sigcache *lru.ARCCache // Cache of recent block signatures to speed up ecrecover
+ LCRS uint64 // Loop count to recreate signers from top tally
+ Period uint64 `json:"period"` // Period of seal each block
+ Number uint64 `json:"number"` // Block Number where the snapshot was created
+ ConfirmedNumber uint64 `json:"confirmed_number"` // Block Number confirmed when the snapshot was created
+ Hash bc.Hash `json:"hash"` // Block hash where the snapshot was created
+ HistoryHash []bc.Hash `json:"historyHash"` // Block hash list for two recent loop
+ Signers []*string `json:"signers"` // Signers queue in current header
+ Votes map[string]*Vote `json:"votes"` // All validate votes from genesis block
+ Tally map[string]uint64 `json:"tally"` // Stake for each candidate address
+ Voters map[string]uint64 `json:"voters"` // Block height for each voter address
+ Candidates map[string]uint64 `json:"candidates"` // Candidates for Signers (0- adding procedure 1- normal 2- removing procedure)
+ Punished map[string]uint64 `json:"punished"` // The signer be punished count cause of missing seal
+ Confirmations map[uint64][]string `json:"confirms"` // The signer confirm given block height
+ Proposals map[bc.Hash]*Proposal `json:"proposals"` // The Proposals going or success (failed proposal will be removed)
+ HeaderTime uint64 `json:"headerTime"` // Time of the current header
+ LoopStartTime uint64 `json:"loopStartTime"` // Start Time of the current loop
+}
+
+// newSnapshot creates a new snapshot with the specified startup parameters. only ever use if for
+// the genesis block.
+func newSnapshot(config *config.DposConfig, sigcache *lru.ARCCache, hash bc.Hash, votes []*Vote, lcrs uint64) *Snapshot {
+
+ snap := &Snapshot{
+ config: config,
+ sigcache: sigcache,
+ LCRS: lcrs,
+ Period: config.Period,
+ Number: 0,
+ ConfirmedNumber: 0,
+ Hash: hash,
+ HistoryHash: []bc.Hash{},
+ Signers: []*string{},
+ Votes: make(map[string]*Vote),
+ Tally: make(map[string]uint64),
+ Voters: make(map[string]uint64),
+ Punished: make(map[string]uint64),
+ Candidates: make(map[string]uint64),
+ Confirmations: make(map[uint64][]string),
+ Proposals: make(map[bc.Hash]*Proposal),
+ HeaderTime: uint64(time.Now().Unix()) - 1,
+ LoopStartTime: config.GenesisTimestamp,
+ }
+ snap.HistoryHash = append(snap.HistoryHash, hash)
+ for _, vote := range votes {
+ // init Votes from each vote
+ snap.Votes[vote.Voter] = vote
+ // init Tally
+ _, ok := snap.Tally[vote.Candidate]
+ if !ok {
+ snap.Tally[vote.Candidate] = 0
+ }
+ fmt.Println("newSnapshot", vote.Candidate, vote.Stake)
+ fmt.Println(snap.Tally)
+ snap.Tally[vote.Candidate] += vote.Stake
+ // init Voters
+ snap.Voters[vote.Voter] = 0 // block height is 0 , vote in genesis block
+ // init Candidates
+ snap.Candidates[vote.Voter] = candidateStateNormal
+ }
+
+ for i := 0; i < int(config.MaxSignerCount); i++ {
+ snap.Signers = append(snap.Signers, &config.SelfVoteSigners[i%len(config.SelfVoteSigners)])
+ }
+
+ return snap
+}
+
+// loadSnapshot loads an existing snapshot from the database.
+func loadSnapshot(config *config.DposConfig, sigcache *lru.ARCCache, store protocol.Store, hash bc.Hash) (*Snapshot, error) {
+ data, err := store.Get(&hash)
+ if err != nil {
+ return nil, err
+ }
+ snap := new(Snapshot)
+ if err := json.Unmarshal(data, snap); err != nil {
+ return nil, err
+ }
+ snap.config = config
+ snap.sigcache = sigcache
+ return snap, nil
+}
+
+// store inserts the snapshot into the database.
+func (s *Snapshot) store(store protocol.Store) error {
+ data, err := json.Marshal(s)
+ if err != nil {
+ return err
+ }
+ return store.Set(&s.Hash, data)
+}
+
+// copy creates a deep copy of the snapshot, though not the individual votes.
+func (s *Snapshot) copy() *Snapshot {
+ cpy := &Snapshot{
+ config: s.config,
+ sigcache: s.sigcache,
+ LCRS: s.LCRS,
+ Period: s.Period,
+ Number: s.Number,
+ ConfirmedNumber: s.ConfirmedNumber,
+ Hash: s.Hash,
+ HistoryHash: make([]bc.Hash, len(s.HistoryHash)),
+
+ Signers: make([]*string, len(s.Signers)),
+ Votes: make(map[string]*Vote),
+ Tally: make(map[string]uint64),
+ Voters: make(map[string]uint64),
+ Candidates: make(map[string]uint64),
+ Punished: make(map[string]uint64),
+ Proposals: make(map[bc.Hash]*Proposal),
+ Confirmations: make(map[uint64][]string),
+
+ HeaderTime: s.HeaderTime,
+ LoopStartTime: s.LoopStartTime,
+ }
+ copy(cpy.HistoryHash, s.HistoryHash)
+ copy(cpy.Signers, s.Signers)
+ for voter, vote := range s.Votes {
+ cpy.Votes[voter] = &Vote{
+ Voter: vote.Voter,
+ Candidate: vote.Candidate,
+ Stake: vote.Stake,
+ }
+ }
+ for candidate, tally := range s.Tally {
+ cpy.Tally[candidate] = tally
+ }
+ for voter, number := range s.Voters {
+ cpy.Voters[voter] = number
+ }
+ for candidate, state := range s.Candidates {
+ cpy.Candidates[candidate] = state
+ }
+ for signer, cnt := range s.Punished {
+ cpy.Punished[signer] = cnt
+ }
+ for blockNumber, confirmers := range s.Confirmations {
+ cpy.Confirmations[blockNumber] = make([]string, len(confirmers))
+ copy(cpy.Confirmations[blockNumber], confirmers)
+ }
+ for txHash, proposal := range s.Proposals {
+ cpy.Proposals[txHash] = proposal.copy()
+ }
+
+ return cpy
+}
+
+// apply creates a new authorization snapshot by applying the given headers to
+// the original one.
+func (s *Snapshot) apply(headers []*types.BlockHeader) (*Snapshot, error) {
+ // Allow passing in no headers for cleaner code
+ if len(headers) == 0 {
+ return s, nil
+ }
+ // Sanity check that the headers can be applied
+ for i := 0; i < len(headers)-1; i++ {
+ if headers[i+1].Height != headers[i].Height+1 {
+ return nil, errInvalidVotingChain
+ }
+ }
+ if headers[0].Height != s.Number+1 {
+ return nil, errInvalidVotingChain
+ }
+ // Iterate through the headers and create a new snapshot
+ snap := s.copy()
+ for _, header := range headers {
+
+ // Resolve the authorization key and check against signers
+ coinbase, err := ecrecover(header, s.sigcache, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ xpub := &chainkd.XPub{}
+ xpub.UnmarshalText(header.Coinbase)
+ derivedPK := xpub.PublicKey()
+ pubHash := crypto.Ripemd160(derivedPK)
+ address, err := common.NewAddressWitnessPubKeyHash(pubHash, &consensus.ActiveNetParams)
+ if err != nil {
+ return nil, err
+ }
+ if coinbase != address.EncodeAddress() {
+ return nil, errUnauthorized
+ }
+
+ headerExtra := HeaderExtra{}
+ if err := json.Unmarshal(header.Extra[extraVanity:len(header.Extra)-extraSeal], &headerExtra); err != nil {
+ return nil, err
+ }
+
+ snap.HeaderTime = header.Timestamp
+ snap.LoopStartTime = headerExtra.LoopStartTime
+ snap.Signers = nil
+ for i := range headerExtra.SignerQueue {
+ snap.Signers = append(snap.Signers, &headerExtra.SignerQueue[i])
+ }
+
+ snap.ConfirmedNumber = headerExtra.ConfirmedBlockNumber
+
+ if len(snap.HistoryHash) >= int(s.config.MaxSignerCount)*2 {
+ snap.HistoryHash = snap.HistoryHash[1 : int(s.config.MaxSignerCount)*2]
+ }
+
+ snap.HistoryHash = append(snap.HistoryHash, header.Hash())
+
+ // deal the new confirmation in this block
+ snap.updateSnapshotByConfirmations(headerExtra.CurrentBlockConfirmations)
+
+ // deal the new vote from voter
+ snap.updateSnapshotByVotes(headerExtra.CurrentBlockVotes, header.Height)
+
+ // deal the voter which balance modified
+ //snap.updateSnapshotByMPVotes(headerExtra.ModifyPredecessorVotes)
+
+ // deal the snap related with punished
+ //snap.updateSnapshotForPunish(headerExtra.SignerMissing, header.Height, header.Coinbase)
+
+ // deal proposals
+ snap.updateSnapshotByProposals(headerExtra.CurrentBlockProposals, header.Height)
+
+ // deal declares
+ snap.updateSnapshotByDeclares(headerExtra.CurrentBlockDeclares, header.Height)
+
+ // calculate proposal result
+ snap.calculateProposalResult(header.Height)
+
+ // check the len of candidate if not candidateNeedPD
+ if !candidateNeedPD && (snap.Number+1)%(snap.config.MaxSignerCount*snap.LCRS) == 0 && len(snap.Candidates) > candidateMaxLen {
+ snap.removeExtraCandidate()
+ }
+
+ }
+ snap.Number += uint64(len(headers))
+ snap.Hash = headers[len(headers)-1].Hash()
+ fmt.Println("updateSnapshotForExpired before", snap.Tally)
+ snap.updateSnapshotForExpired()
+ fmt.Println("updateSnapshotForExpired after", snap.Tally)
+ err := snap.verifyTallyCnt()
+ if err != nil {
+ return nil, err
+ }
+ return snap, nil
+}
+
+func (s *Snapshot) removeExtraCandidate() {
+ // remove minimum tickets tally beyond candidateMaxLen
+ fmt.Println("removeExtraCandidate")
+ tallySlice := s.buildTallySlice()
+ sort.Sort(TallySlice(tallySlice))
+ if len(tallySlice) > candidateMaxLen {
+ removeNeedTally := tallySlice[candidateMaxLen:]
+ for _, tallySlice := range removeNeedTally {
+ delete(s.Candidates, tallySlice.addr)
+ }
+ }
+}
+
+func (s *Snapshot) verifyTallyCnt() error {
+
+ tallyTarget := make(map[string]uint64)
+ for _, v := range s.Votes {
+ if _, ok := tallyTarget[v.Candidate]; ok {
+ tallyTarget[v.Candidate] = tallyTarget[v.Candidate] + v.Stake
+ } else {
+ tallyTarget[v.Candidate] = v.Stake
+ }
+ }
+ for address, tally := range s.Tally {
+ if targetTally, ok := tallyTarget[address]; ok && targetTally == tally {
+ continue
+ } else {
+ fmt.Println(address, "not find in votes")
+ }
+ }
+
+ return nil
+}
+
+func (s *Snapshot) updateSnapshotByDeclares(declares []Declare, headerHeight uint64) {
+ for _, declare := range declares {
+ if proposal, ok := s.Proposals[declare.ProposalHash]; ok {
+ // check the proposal enable status and valid block number
+ if proposal.ReceivedNumber+proposal.ValidationLoopCnt*s.config.MaxSignerCount < headerHeight || !s.isCandidate(declare.Declarer) {
+ continue
+ }
+ // check if this signer already declare on this proposal
+ alreadyDeclare := false
+ for _, v := range proposal.Declares {
+ if v.Declarer == declare.Declarer {
+ // this declarer already declare for this proposal
+ alreadyDeclare = true
+ break
+ }
+ }
+ if alreadyDeclare {
+ continue
+ }
+ // add declare to proposal
+ s.Proposals[declare.ProposalHash].Declares = append(s.Proposals[declare.ProposalHash].Declares,
+ &Declare{declare.ProposalHash, declare.Declarer, declare.Decision})
+
+ }
+ }
+}
+
+func (s *Snapshot) calculateProposalResult(headerHeight uint64) {
+
+ for hashKey, proposal := range s.Proposals {
+ // the result will be calculate at receiverdNumber + vlcnt + 1
+ if proposal.ReceivedNumber+proposal.ValidationLoopCnt*s.config.MaxSignerCount+1 == headerHeight {
+ // calculate the current stake of this proposal
+ judegmentStake := big.NewInt(0)
+ for _, tally := range s.Tally {
+ judegmentStake.Add(judegmentStake, new(big.Int).SetUint64(tally))
+ }
+ judegmentStake.Mul(judegmentStake, big.NewInt(2))
+ judegmentStake.Div(judegmentStake, big.NewInt(3))
+ // calculate declare stake
+ yesDeclareStake := big.NewInt(0)
+ for _, declare := range proposal.Declares {
+ if declare.Decision {
+ if _, ok := s.Tally[declare.Declarer]; ok {
+ yesDeclareStake.Add(yesDeclareStake, new(big.Int).SetUint64(s.Tally[declare.Declarer]))
+ }
+ }
+ }
+ if yesDeclareStake.Cmp(judegmentStake) > 0 {
+ // process add candidate
+ switch proposal.ProposalType {
+ case proposalTypeCandidateAdd:
+ if candidateNeedPD {
+ s.Candidates[s.Proposals[hashKey].Candidate] = candidateStateNormal
+ }
+ case proposalTypeCandidateRemove:
+ if _, ok := s.Candidates[proposal.Candidate]; ok && candidateNeedPD {
+ delete(s.Candidates, proposal.Candidate)
+ }
+ case proposalTypeMinerRewardDistributionModify:
+ minerRewardPerThousand = s.Proposals[hashKey].MinerRewardPerThousand
+
+ }
+ }
+
+ }
+
+ }
+
+}
+
+func (s *Snapshot) updateSnapshotByProposals(proposals []Proposal, headerHeight uint64) {
+ for _, proposal := range proposals {
+ proposal.ReceivedNumber = headerHeight
+ s.Proposals[proposal.Hash] = &proposal
+ }
+}
+
+func (s *Snapshot) updateSnapshotForExpired() {
+
+ // deal the expired vote
+ var expiredVotes []*Vote
+ for voterAddress, voteNumber := range s.Voters {
+ if s.Number-voteNumber > s.config.Epoch {
+ // clear the vote
+ if expiredVote, ok := s.Votes[voterAddress]; ok {
+ expiredVotes = append(expiredVotes, expiredVote)
+ }
+ }
+ }
+ // remove expiredVotes only enough voters left
+ if uint64(len(s.Voters)-len(expiredVotes)) >= s.config.MaxSignerCount {
+ for _, expiredVote := range expiredVotes {
+ s.Tally[expiredVote.Candidate] -= expiredVote.Stake
+ // TODO
+ /*
+ if s.Tally[expiredVote.Candidate] == 0 {
+ delete(s.Tally, expiredVote.Candidate)
+ }
+ delete(s.Votes, expiredVote.Voter)
+ delete(s.Voters, expiredVote.Voter)
+ */
+ }
+ }
+
+ // deal the expired confirmation
+ for blockNumber := range s.Confirmations {
+ if s.Number-blockNumber > s.config.MaxSignerCount {
+ delete(s.Confirmations, blockNumber)
+ }
+ }
+
+ // TODO
+ /*
+ // remove 0 stake tally
+ for address, tally := range s.Tally {
+ if tally <= 0 {
+ delete(s.Tally, address)
+ }
+ }
+ */
+}
+
+func (s *Snapshot) updateSnapshotByConfirmations(confirmations []Confirmation) {
+ for _, confirmation := range confirmations {
+ _, ok := s.Confirmations[confirmation.BlockNumber]
+ if !ok {
+ s.Confirmations[confirmation.BlockNumber] = []string{}
+ }
+ addConfirmation := true
+ for _, address := range s.Confirmations[confirmation.BlockNumber] {
+ if confirmation.Signer == address {
+ addConfirmation = false
+ break
+ }
+ }
+ if addConfirmation == true {
+ s.Confirmations[confirmation.BlockNumber] = append(s.Confirmations[confirmation.BlockNumber], confirmation.Signer)
+ }
+ }
+}
+
+func (s *Snapshot) updateSnapshotByVotes(votes []Vote, headerHeight uint64) {
+ fmt.Println("updateSnapshotByVotes start")
+ for _, vote := range votes {
+ // update Votes, Tally, Voters data
+ if lastVote, ok := s.Votes[vote.Voter]; ok {
+ fmt.Println("lastVote.Candidate:", lastVote.Candidate)
+ fmt.Println("lastVote.Stake:", lastVote.Stake)
+ fmt.Println(s.Tally[lastVote.Candidate]-lastVote.Stake, s.Tally[lastVote.Candidate])
+ s.Tally[lastVote.Candidate] = s.Tally[lastVote.Candidate] - lastVote.Stake
+ fmt.Println(s.Tally)
+ fmt.Println(s.Tally[lastVote.Candidate])
+ }
+ if _, ok := s.Tally[vote.Candidate]; ok {
+ fmt.Println("vote.Candidate:", vote.Candidate)
+ fmt.Println("vote.Stake:", vote.Stake)
+ s.Tally[vote.Candidate] = s.Tally[vote.Candidate] + vote.Stake
+ } else {
+ fmt.Println("111 vote.Candidate:", vote.Candidate)
+ fmt.Println("111 vote.Stake:", vote.Stake)
+ s.Tally[vote.Candidate] = vote.Stake
+ if !candidateNeedPD {
+ s.Candidates[vote.Candidate] = candidateStateNormal
+ }
+ }
+ s.Votes[vote.Voter] = &Vote{vote.Voter, vote.Candidate, vote.Stake}
+ s.Voters[vote.Voter] = headerHeight
+ }
+ fmt.Println(votes)
+ fmt.Println(s.Tally)
+ fmt.Println("updateSnapshotByVotes end")
+}
+
+func (s *Snapshot) updateSnapshotByMPVotes(votes []Vote) {
+ fmt.Println("8888888888888888888888888888888888")
+ fmt.Println(s.Tally)
+ for _, txVote := range votes {
+ fmt.Println("updateSnapshotByMPVotesupdateSnapshotByMPVotesupdateSnapshotByMPVotes")
+ if lastVote, ok := s.Votes[txVote.Voter]; ok {
+ fmt.Println("txVote.Voter:", txVote.Voter)
+ fmt.Println("lastVote.Candidate:", lastVote.Candidate, ",lastVote.Stake:", lastVote.Stake)
+ fmt.Println("txVote.Stake:", txVote.Stake)
+ s.Tally[lastVote.Candidate] = s.Tally[lastVote.Candidate] - lastVote.Stake
+ s.Tally[lastVote.Candidate] = s.Tally[lastVote.Candidate] + txVote.Stake
+ s.Votes[txVote.Voter] = &Vote{Voter: txVote.Voter, Candidate: lastVote.Candidate, Stake: txVote.Stake}
+ fmt.Println(txVote.Voter, lastVote.Candidate, txVote.Stake)
+ // do not modify header number of snap.Voters
+ }
+ }
+ fmt.Println(s.Tally)
+ fmt.Println("999999999999999999999999999999999")
+}
+
+func (s *Snapshot) updateSnapshotForPunish(signerMissing []string, headerNumber uint64, coinbase string) {
+ // set punished count to half of origin in Epoch
+ /*
+ if headerNumber.Uint64()%s.config.Epoch == 0 {
+ for bePublished := range s.Punished {
+ if count := s.Punished[bePublished] / 2; count > 0 {
+ s.Punished[bePublished] = count
+ } else {
+ delete(s.Punished, bePublished)
+ }
+ }
+ }
+ */
+ // punish the missing signer
+ for _, signerMissing := range signerMissing {
+ if _, ok := s.Punished[signerMissing]; ok {
+ s.Punished[signerMissing] += missingPublishCredit
+ } else {
+ s.Punished[signerMissing] = missingPublishCredit
+ }
+ }
+ // reduce the punish of sign signer
+ if _, ok := s.Punished[coinbase]; ok {
+
+ if s.Punished[coinbase] > signRewardCredit {
+ s.Punished[coinbase] -= signRewardCredit
+ } else {
+ delete(s.Punished, coinbase)
+ }
+ }
+ // reduce the punish for all punished
+ for signerEach := range s.Punished {
+ if s.Punished[signerEach] > autoRewardCredit {
+ s.Punished[signerEach] -= autoRewardCredit
+ } else {
+ delete(s.Punished, signerEach)
+ }
+ }
+}
+
+// inturn returns if a signer at a given block height is in-turn or not.
+func (s *Snapshot) inturn(signer string, headerTime uint64) bool {
+ for _, addr := range s.Signers {
+ fmt.Println("inturn [addr]:", *addr)
+ }
+ fmt.Println("signer:", signer)
+ // if all node stop more than period of one loop
+ loopIndex := int((headerTime-s.LoopStartTime)/s.config.Period) % len(s.Signers)
+ fmt.Println(headerTime-s.LoopStartTime, s.config.Period, len(s.Signers), loopIndex)
+ if loopIndex >= len(s.Signers) {
+ return false
+ } else if *s.Signers[loopIndex] != signer {
+ return false
+
+ }
+ return true
+}
+
+// check if address belong to voter
+func (s *Snapshot) isVoter(address string) bool {
+ if _, ok := s.Voters[address]; ok {
+ return true
+ }
+ return false
+}
+
+// check if address belong to candidate
+func (s *Snapshot) isCandidate(address string) bool {
+ if _, ok := s.Candidates[address]; ok {
+ return true
+ }
+ return false
+}
+
+// get last block number meet the confirm condition
+func (s *Snapshot) getLastConfirmedBlockNumber(confirmations []Confirmation) *big.Int {
+
+ cpyConfirmations := make(map[uint64][]string)
+ for blockNumber, confirmers := range s.Confirmations {
+ cpyConfirmations[blockNumber] = make([]string, len(confirmers))
+ copy(cpyConfirmations[blockNumber], confirmers)
+ }
+ // update confirmation into snapshot
+ for _, confirmation := range confirmations {
+ _, ok := cpyConfirmations[confirmation.BlockNumber]
+ if !ok {
+ cpyConfirmations[confirmation.BlockNumber] = []string{}
+ }
+ addConfirmation := true
+ for _, address := range cpyConfirmations[confirmation.BlockNumber] {
+ if confirmation.Signer == address {
+ addConfirmation = false
+ break
+ }
+ }
+ if addConfirmation == true {
+
+ cpyConfirmations[confirmation.BlockNumber] = append(cpyConfirmations[confirmation.BlockNumber], confirmation.Signer)
+ }
+ }
+
+ i := s.Number
+ for ; i > s.Number-s.config.MaxSignerCount*2/3+1; i-- {
+ if confirmers, ok := cpyConfirmations[i]; ok {
+ if len(confirmers) > int(s.config.MaxSignerCount*2/3) {
+ return big.NewInt(int64(i))
+ }
+ }
+ }
+ return big.NewInt(int64(i))
+}
blockHeaderPrefix = []byte("BH:")
txStatusPrefix = []byte("BTS:")
cliamTxPreFix = []byte("Claim:")
+ dposPreFix = []byte("dpos:")
)
func loadBlockStoreStateJSON(db dbm.DB) *protocol.BlockStoreState {
return append(cliamTxPreFix, hash.Bytes()...)
}
+func calcDPosKey(hash *bc.Hash) []byte {
+ return append(dposPreFix, hash.Bytes()...)
+}
+
// GetBlock return the block by given hash
func GetBlock(db dbm.DB, hash *bc.Hash) *types.Block {
bytez := db.Get(calcBlockKey(hash))
batch.Set(calcClaimTxKey(hash), []byte("1"))
batch.Write()
}
+
+func (s *Store) Set(hash *bc.Hash, data []byte) error {
+ batch := s.db.NewBatch()
+ batch.Set(calcDPosKey(hash), data)
+ batch.Write()
+ return nil
+}
+
+func (s *Store) Get(hash *bc.Hash) ([]byte, error) {
+ data := s.db.Get(calcDPosKey(hash))
+ if data == nil {
+ return nil, errors.New("can't find the snapshot by given hash")
+ }
+ return data, nil
+}
if data == nil {
continue
}
-
var utxo storage.UtxoEntry
if err := proto.Unmarshal(data, &utxo); err != nil {
return errors.Wrap(err, "unmarshaling utxo entry")
+++ /dev/null
-package consensus
-
-import (
- "math/big"
-
- "github.com/vapor/common"
- "github.com/vapor/protocol/bc"
- "github.com/vapor/protocol/bc/types"
-)
-
-// ChainReader defines a small collection of methods needed to access the local
-// blockchain during header and/or uncle verification.
-type ChainReader interface {
- // Config retrieves the blockchain's chain configuration.
- //Config() *params.ChainConfig
-
- // CurrentHeader retrieves the current header from the local chain.
- CurrentHeader() *types.BlockHeader
-
- // GetHeader retrieves a block header from the database by hash and number.
- GetHeader(hash bc.Hash, number uint64) *types.BlockHeader
-
- // GetHeaderByNumber retrieves a block header from the database by number.
- GetHeaderByNumber(number uint64) *types.BlockHeader
-
- // GetHeaderByHash retrieves a block header from the database by its hash.
- GetHeaderByHash(hash bc.Hash) *types.BlockHeader
-
- // GetBlock retrieves a block from the database by hash and number.
- GetBlock(hash bc.Hash, number uint64) *types.Block
-}
-
-// Engine is an algorithm agnostic consensus engine.
-type Engine interface {
- // Author retrieves the Ethereum address of the account that minted the given
- // block, which may be different from the header's coinbase if a consensus
- // engine is based on signatures.
- Author(header *types.BlockHeader) (common.Address, error)
-
- // VerifyHeader checks whether a header conforms to the consensus rules of a
- // given engine. Verifying the seal may be done optionally here, or explicitly
- // via the VerifySeal method.
- VerifyHeader(chain ChainReader, header *types.BlockHeader, seal bool) error
-
- // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
- // concurrently. The method returns a quit channel to abort the operations and
- // a results channel to retrieve the async verifications (the order is that of
- // the input slice).
- VerifyHeaders(chain ChainReader, headers []*types.BlockHeader, seals []bool) (chan<- struct{}, <-chan error)
-
- // VerifyUncles verifies that the given block's uncles conform to the consensus
- // rules of a given engine.
- VerifyUncles(chain ChainReader, block *types.Block) error
-
- // VerifySeal checks whether the crypto seal on a header is valid according to
- // the consensus rules of the given engine.
- VerifySeal(chain ChainReader, header *types.BlockHeader) error
-
- // Prepare initializes the consensus fields of a block header according to the
- // rules of a particular engine. The changes are executed inline.
- Prepare(chain ChainReader, header *types.BlockHeader) error
-
- // Finalize runs any post-transaction state modifications (e.g. block rewards)
- // and assembles the final block.
- // Note: The block header and state database might be updated to reflect any
- // consensus rules that happen at finalization (e.g. block rewards).
- /*
- Finalize(chain ChainReader, header *types.BlockHeader , state *state.StateDB, txs []*types.Transaction,
- uncles []*types.BlockHeader , receipts []*types.Receipt) (*types.Block, error)
- */
-
- // Seal generates a new block for the given input block with the local miner's
- // seal place on top.
- Seal(chain ChainReader, block *types.Block, stop <-chan struct{}) (*types.Block, error)
-
- // CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
- // that a new block should have.
- CalcDifficulty(chain ChainReader, time uint64, parent *types.BlockHeader) *big.Int
-
- // APIs returns the RPC APIs this consensus engine provides.
- //APIs(chain ChainReader) []rpc.API
-}
+++ /dev/null
-package dpos
+++ /dev/null
-package dpos
-
-import (
- "errors"
- "fmt"
- "math/big"
- "sync"
- "time"
-
- "github.com/go-loom/types"
- lru "github.com/hashicorp/golang-lru"
- log "github.com/sirupsen/logrus"
- "github.com/vapor/common"
- "github.com/vapor/config"
- "github.com/vapor/protocol"
- "github.com/vapor/protocol/bc"
-)
-
-const (
- inMemorySnapshots = 128 // Number of recent vote snapshots to keep in memory
- inMemorySignatures = 4096 // Number of recent block signatures to keep in memory
- secondsPerYear = 365 * 24 * 3600 // Number of seconds for one year
- checkpointInterval = 360 // About N hours if config.period is N
- module = "dpos"
-)
-
-//delegated-proof-of-stake protocol constants.
-var (
- SignerBlockReward = big.NewInt(5e+18) // Block reward in wei for successfully mining a block first year
- defaultEpochLength = uint64(3000000) // Default number of blocks after which vote's period of validity
- defaultBlockPeriod = uint64(3) // Default minimum difference between two consecutive block's timestamps
- defaultMaxSignerCount = uint64(21) //
- defaultMinVoterBalance = new(big.Int).Mul(big.NewInt(10000), big.NewInt(1e+18))
- extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity
- extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal
- defaultDifficulty = big.NewInt(1) // Default difficulty
- defaultLoopCntRecalculateSigners = uint64(10) // Default loop count to recreate signers from top tally
- minerRewardPerThousand = uint64(618) // Default reward for miner in each block from block reward (618/1000)
- candidateNeedPD = false // is new candidate need Proposal & Declare process
-)
-
-var (
- // errUnknownBlock is returned when the list of signers is requested for a block
- // that is not part of the local blockchain.
- errUnknownBlock = errors.New("unknown block")
-
- // errMissingVanity is returned if a block's extra-data section is shorter than
- // 32 bytes, which is required to store the signer vanity.
- errMissingVanity = errors.New("extra-data 32 byte vanity prefix missing")
-
- // errMissingSignature is returned if a block's extra-data section doesn't seem
- // to contain a 65 byte secp256k1 signature.
- errMissingSignature = errors.New("extra-data 65 byte suffix signature missing")
-
- // errInvalidMixDigest is returned if a block's mix digest is non-zero.
- errInvalidMixDigest = errors.New("non-zero mix digest")
-
- // errInvalidUncleHash is returned if a block contains an non-empty uncle list.
- errInvalidUncleHash = errors.New("non empty uncle hash")
-
- // ErrInvalidTimestamp is returned if the timestamp of a block is lower than
- // the previous block's timestamp + the minimum block period.
- ErrInvalidTimestamp = errors.New("invalid timestamp")
-
- // errInvalidVotingChain is returned if an authorization list is attempted to
- // be modified via out-of-range or non-contiguous headers.
- errInvalidVotingChain = errors.New("invalid voting chain")
-
- // errUnauthorized is returned if a header is signed by a non-authorized entity.
- errUnauthorized = errors.New("unauthorized")
-
- // errPunishedMissing is returned if a header calculate punished signer is wrong.
- errPunishedMissing = errors.New("punished signer missing")
-
- // errWaitTransactions is returned if an empty block is attempted to be sealed
- // on an instant chain (0 second period). It's important to refuse these as the
- // block reward is zero, so an empty block just bloats the chain... fast.
- errWaitTransactions = errors.New("waiting for transactions")
-
- // errUnclesNotAllowed is returned if uncles exists
- errUnclesNotAllowed = errors.New("uncles not allowed")
-
- // errCreateSignerQueueNotAllowed is returned if called in (block number + 1) % maxSignerCount != 0
- errCreateSignerQueueNotAllowed = errors.New("create signer queue not allowed")
-
- // errInvalidSignerQueue is returned if verify SignerQueue fail
- errInvalidSignerQueue = errors.New("invalid signer queue")
-
- // errSignerQueueEmpty is returned if no signer when calculate
- errSignerQueueEmpty = errors.New("signer queue is empty")
-)
-
-type Dpos struct {
- config *config.DposConfig // Consensus engine configuration parameters
- store protocol.Store // Database to store and retrieve snapshot checkpoints
- recents *lru.ARCCache // Snapshots for recent block to speed up reorgs
- signatures *lru.ARCCache // Signatures of recent blocks to speed up mining
- signer common.Address // Ethereum address of the signing key
- signFn SignerFn // Signer function to authorize hashes with
- signTxFn SignTxFn // Sign transaction function to sign tx
- lock sync.RWMutex // Protects the signer fields
- lcsc uint64 // Last confirmed side chain
-}
-
-// SignerFn is a signer callback function to request a hash to be signed by a backing account.
-type SignerFn func(common.Address, []byte) ([]byte, error)
-
-// SignTxFn is a signTx
-type SignTxFn func(common.Address, *types.Transaction, *big.Int) (*types.Transaction, error)
-
-//
-func ecrecover(header *types.BlockHeader, sigcache *lru.ARCCache) (common.Address, error) {
- return nil, nil
-}
-
-func sigHash(header *types.BlockHeader) (hash bc.Hash) {
- return bc.Hash{}
-}
-
-//
-func New(config *config.DposConfig, store protocol.Store) *Dpos {
- conf := *config
- if conf.Epoch == 0 {
- conf.Epoch = defaultEpochLength
- }
- if conf.Period == 0 {
- conf.Period = defaultBlockPeriod
- }
- if conf.MaxSignerCount == 0 {
- conf.MaxSignerCount = defaultMaxSignerCount
- }
- if conf.MinVoterBalance.Uint64() == 0 {
- conf.MinVoterBalance = defaultMinVoterBalance
- }
-
- // Allocate the snapshot caches and create the engine
- recents, _ := lru.NewARC(inMemorySnapshots)
- signatures, _ := lru.NewARC(inMemorySignatures)
- return &Dpos{
- config: &conf,
- store: store,
- recents: recents,
- signatures: signatures,
- }
-}
-
-// 从BLockHeader中获取到地址
-func (d *Dpos) Author(header *types.BlockHeader) (common.Address, error) {
- return ecrecover(header, d.signatures)
-}
-
-// Prepare implements consensus.Engine, preparing all the consensus fields of the header for running the transactions on top.
-func (d *Dpos) Prepare(c *protocol.Chain, header *bc.BlockHeader) error {
- if d.config.GenesisTimestamp < uint64(time.Now().Unix()) {
- return nil
- }
-
- if header.Height == 1 {
- for {
- delay := time.Unix(int64(d.config.GenesisTimestamp-2), 0).Sub(time.Now())
- if delay <= time.Duration(0) {
- log.WithFields(log.Fields{"module": module, "time": time.Now()}).Info("Ready for seal block")
- break
- } else if delay > time.Duration(d.config.Period)*time.Second {
- delay = time.Duration(d.config.Period) * time.Second
- }
- log.WithFields(log.Fields{"module": module, "delay": time.Duration(time.Unix(int64(d.config.GenesisTimestamp-2), 0).Sub(time.Now()))}).Info("Waiting for seal block")
- select {
- case <-time.After(delay):
- continue
- }
- }
- }
- return nil
-}
-
-func (d *Dpos) Finalize(c *protocol.Chain, header *bc.BlockHeader, txs []*types.Transaction) (*bc.Block, error) {
- height := c.BestBlockHeight()
- fmt.Println(height)
- parent, err := c.GetHeaderByHeight(height - 1)
- if parent == nil {
- return nil, err
- }
- //header.Timestamp
- t := new(big.Int).Add(new(big.Int).SetUint64(parent.Timestamp), new(big.Int).SetUint64(d.config.Period))
- header.Timestamp = t.Uint64()
-
- if header.Timestamp < uint64(time.Now().Unix()) {
- header.Timestamp = uint64(time.Now().Unix())
- }
-
-
- return nil, nil
-}
-
-func (d *Dpos) Seal() {
-
-}
+++ /dev/null
-package dpos
+++ /dev/null
-package dpos
--- /dev/null
+package miner
+
+import (
+ "errors"
+ "sync"
+ "time"
+
+ "github.com/vapor/config"
+
+ log "github.com/sirupsen/logrus"
+
+ "github.com/vapor/account"
+ "github.com/vapor/common"
+ "github.com/vapor/consensus"
+ engine "github.com/vapor/consensus/consensus"
+ "github.com/vapor/consensus/consensus/dpos"
+ "github.com/vapor/crypto"
+ "github.com/vapor/crypto/ed25519/chainkd"
+ "github.com/vapor/mining"
+ "github.com/vapor/protocol"
+ "github.com/vapor/protocol/bc"
+ "github.com/vapor/protocol/bc/types"
+ "github.com/vapor/protocol/vm/vmutil"
+)
+
+const (
+ maxNonce = ^uint64(0) // 2^64 - 1
+ defaultNumWorkers = 1
+ hashUpdateSecs = 1
+ module = "miner"
+)
+
+var ConsensusEngine engine.Engine
+
+// Miner creates blocks and searches for proof-of-work values.
+type Miner struct {
+ sync.Mutex
+ chain *protocol.Chain
+ accountManager *account.Manager
+ txPool *protocol.TxPool
+ numWorkers uint64
+ started bool
+ discreteMining bool
+ workerWg sync.WaitGroup
+ updateNumWorkers chan struct{}
+ quit chan struct{}
+ newBlockCh chan *bc.Hash
+ Authoritys map[string]string
+ position uint64
+ engine engine.Engine
+}
+
+func NewMiner(c *protocol.Chain, accountManager *account.Manager, txPool *protocol.TxPool, newBlockCh chan *bc.Hash, engine engine.Engine) *Miner {
+ authoritys := make(map[string]string)
+ var position uint64
+ dpos, ok := engine.(*dpos.Dpos)
+ if !ok {
+ log.Error("Only the dpos engine was allowed")
+ return nil
+ }
+ dpos.Authorize(config.CommonConfig.Consensus.Dpos.Coinbase)
+ /*
+ for index, xpub := range consensus.ActiveNetParams.SignBlockXPubs {
+ pubHash := crypto.Ripemd160(xpub.PublicKey())
+ address, _ := common.NewPeginAddressWitnessScriptHash(pubHash, &consensus.ActiveNetParams)
+ control, _ := vmutil.P2WPKHProgram([]byte(pubHash))
+ //key := hex.EncodeToString(control)
+ //authoritys[key] = xpub.String()
+ authoritys[address.EncodeAddress()] = xpub.String()
+ if accountManager.IsLocalControlProgram(control) {
+ position = uint64(index)
+ dpos.Authorize(address.EncodeAddress())
+ }
+ }
+ */
+ //c.SetAuthoritys(authoritys)
+ //c.SetPosition(position)
+ c.SetConsensusEngine(dpos)
+ ConsensusEngine = dpos
+ return &Miner{
+ chain: c,
+ accountManager: accountManager,
+ txPool: txPool,
+ numWorkers: defaultNumWorkers,
+ updateNumWorkers: make(chan struct{}),
+ newBlockCh: newBlockCh,
+ Authoritys: authoritys,
+ position: position,
+ engine: dpos,
+ }
+}
+
+func (m *Miner) generateProof(block types.Block) (types.Proof, error) {
+ var xPrv chainkd.XPrv
+ if consensus.ActiveNetParams.Signer == "" {
+ return types.Proof{}, errors.New("Signer is empty")
+ }
+ xPrv.UnmarshalText([]byte(consensus.ActiveNetParams.Signer))
+ sign := xPrv.Sign(block.BlockCommitment.TransactionsMerkleRoot.Bytes())
+ pubHash := crypto.Ripemd160(xPrv.XPub().PublicKey())
+
+ address, _ := common.NewPeginAddressWitnessScriptHash(pubHash, &consensus.ActiveNetParams)
+ control, err := vmutil.P2WPKHProgram([]byte(pubHash))
+ if err != nil {
+ return types.Proof{}, err
+ }
+ return types.Proof{Sign: sign, ControlProgram: control, Address: address.ScriptAddress()}, nil
+}
+
+// generateBlocks is a worker that is controlled by the miningWorkerController.
+// It is self contained in that it creates block templates and attempts to solve
+// them while detecting when it is performing stale work and reacting
+// accordingly by generating a new block template. When a block is solved, it
+// is submitted.
+//
+// It must be run as a goroutine.
+func (m *Miner) generateBlocks(quit chan struct{}) {
+ ticker := time.NewTicker(time.Second * hashUpdateSecs)
+ defer ticker.Stop()
+
+out:
+ for {
+ select {
+ case <-quit:
+ break out
+ default:
+ }
+ /*
+ engine, ok := m.engine.(*dpos.Dpos)
+ if !ok {
+ log.Error("Only the dpos engine was allowed")
+ return
+ }
+
+ header := m.chain.BestBlockHeader()
+ isSeal, err := engine.IsSealer(m.chain, header.Hash(), header, uint64(time.Now().Unix()))
+ if err != nil {
+ log.WithFields(log.Fields{"module": module, "error": err}).Error("Determine whether seal is wrong")
+ continue
+ }
+ */
+ isSeal := true
+ if isSeal {
+ block, err := mining.NewBlockTemplate1(m.chain, m.txPool, m.accountManager, m.engine)
+ if err != nil {
+ log.Errorf("Mining: failed on create NewBlockTemplate: %v", err)
+ time.Sleep(3 * time.Second)
+ continue
+ }
+ if block == nil {
+ time.Sleep(3 * time.Second)
+ continue
+ }
+ block, err = m.engine.Seal(m.chain, block)
+ if err != nil {
+ log.Errorf("Seal, %v", err)
+ continue
+ }
+ m.chain.SetConsensusEngine(m.engine)
+ if isOrphan, err := m.chain.ProcessBlock(block); err == nil {
+ log.WithFields(log.Fields{
+ "height": block.BlockHeader.Height,
+ "isOrphan": isOrphan,
+ "tx": len(block.Transactions),
+ }).Info("Miner processed block")
+
+ blockHash := block.Hash()
+ m.newBlockCh <- &blockHash
+ } else {
+ log.WithField("height", block.BlockHeader.Height).Errorf("Miner fail on ProcessBlock, %v", err)
+ }
+ }
+ time.Sleep(3 * time.Second)
+ }
+
+ m.workerWg.Done()
+}
+
+// miningWorkerController launches the worker goroutines that are used to
+// generate block templates and solve them. It also provides the ability to
+// dynamically adjust the number of running worker goroutines.
+//
+// It must be run as a goroutine.
+func (m *Miner) miningWorkerController() {
+ // launchWorkers groups common code to launch a specified number of
+ // workers for generating blocks.
+ var runningWorkers []chan struct{}
+ launchWorkers := func(numWorkers uint64) {
+ for i := uint64(0); i < numWorkers; i++ {
+ quit := make(chan struct{})
+ runningWorkers = append(runningWorkers, quit)
+
+ m.workerWg.Add(1)
+ go m.generateBlocks(quit)
+ }
+ }
+
+ // Launch the current number of workers by default.
+ runningWorkers = make([]chan struct{}, 0, m.numWorkers)
+ launchWorkers(m.numWorkers)
+
+out:
+ for {
+ select {
+ // Update the number of running workers.
+ case <-m.updateNumWorkers:
+ // No change.
+ numRunning := uint64(len(runningWorkers))
+ if m.numWorkers == numRunning {
+ continue
+ }
+
+ // Add new workers.
+ if m.numWorkers > numRunning {
+ launchWorkers(m.numWorkers - numRunning)
+ continue
+ }
+
+ // Signal the most recently created goroutines to exit.
+ for i := numRunning - 1; i >= m.numWorkers; i-- {
+ close(runningWorkers[i])
+ runningWorkers[i] = nil
+ runningWorkers = runningWorkers[:i]
+ }
+
+ case <-m.quit:
+ for _, quit := range runningWorkers {
+ close(quit)
+ }
+ break out
+ }
+ }
+
+ m.workerWg.Wait()
+}
+
+// Start begins the CPU mining process as well as the speed monitor used to
+// track hashing metrics. Calling this function when the CPU miner has
+// already been started will have no effect.
+//
+// This function is safe for concurrent access.
+func (m *Miner) Start() {
+ m.Lock()
+ defer m.Unlock()
+
+ // Nothing to do if the miner is already running
+ if m.started {
+ return
+ }
+
+ m.quit = make(chan struct{})
+ go m.miningWorkerController()
+
+ m.started = true
+ log.Infof("CPU miner started")
+}
+
+// Stop gracefully stops the mining process by signalling all workers, and the
+// speed monitor to quit. Calling this function when the CPU miner has not
+// already been started will have no effect.
+//
+// This function is safe for concurrent access.
+func (m *Miner) Stop() {
+ m.Lock()
+ defer m.Unlock()
+
+ // Nothing to do if the miner is not currently running
+ if !m.started {
+ return
+ }
+
+ close(m.quit)
+ m.started = false
+ log.Info("CPU miner stopped")
+}
+
+// IsMining returns whether or not the CPU miner has been started and is
+// therefore currenting mining.
+//
+// This function is safe for concurrent access.
+func (m *Miner) IsMining() bool {
+ m.Lock()
+ defer m.Unlock()
+
+ return m.started
+}
+
+// SetNumWorkers sets the number of workers to create which solve blocks. Any
+// negative values will cause a default number of workers to be used which is
+// based on the number of processor cores in the system. A value of 0 will
+// cause all CPU mining to be stopped.
+//
+// This function is safe for concurrent access.
+func (m *Miner) SetNumWorkers(numWorkers int32) {
+ if numWorkers == 0 {
+ m.Stop()
+ }
+
+ // Don't lock until after the first check since Stop does its own
+ // locking.
+ m.Lock()
+ defer m.Unlock()
+
+ // Use default if provided value is negative.
+ if numWorkers < 0 {
+ m.numWorkers = defaultNumWorkers
+ } else {
+ m.numWorkers = uint64(numWorkers)
+ }
+
+ // When the miner is already running, notify the controller about the
+ // the change.
+ if m.started {
+ m.updateNumWorkers <- struct{}{}
+ }
+}
+
+// NumWorkers returns the number of workers which are running to solve blocks.
+//
+// This function is safe for concurrent access.
+func (m *Miner) NumWorkers() int32 {
+ m.Lock()
+ defer m.Unlock()
+
+ return int32(m.numWorkers)
+}
"strconv"
"time"
+ "github.com/vapor/common"
+
log "github.com/sirupsen/logrus"
"github.com/vapor/account"
"github.com/vapor/blockchain/txbuilder"
+ "github.com/vapor/config"
"github.com/vapor/consensus"
+ engine "github.com/vapor/consensus/consensus"
+ "github.com/vapor/crypto/ed25519/chainkd"
"github.com/vapor/errors"
"github.com/vapor/protocol"
"github.com/vapor/protocol/bc"
arbitrary := append([]byte{0x00}, []byte(strconv.FormatUint(blockHeight, 10))...)
var script []byte
- if accountManager == nil {
- script, err = vmutil.DefaultCoinbaseProgram()
- } else {
- script, err = accountManager.GetCoinbaseControlProgram()
- arbitrary = append(arbitrary, accountManager.GetCoinbaseArbitrary()...)
- }
+ address, _ := common.DecodeAddress(config.CommonConfig.Consensus.Dpos.Coinbase, &consensus.ActiveNetParams)
+ redeemContract := address.ScriptAddress()
+ script, _ = vmutil.P2WPKHProgram(redeemContract)
+ /*
+ if accountManager == nil {
+ script, err = vmutil.DefaultCoinbaseProgram()
+ } else {
+
+ script, err = accountManager.GetCoinbaseControlProgram()
+ arbitrary = append(arbitrary, accountManager.GetCoinbaseArbitrary()...)
+ }
+ */
if err != nil {
return nil, err
}
preBlockHash := preBlockHeader.Hash()
nextBlockHeight := preBlockHeader.Height + 1
- b = &types.Block{
- BlockHeader: types.BlockHeader{
- Version: 1,
- Height: nextBlockHeight,
- PreviousBlockHash: preBlockHash,
- Timestamp: uint64(time.Now().Unix()),
- BlockCommitment: types.BlockCommitment{},
- },
+ header := types.BlockHeader{
+ Version: 1,
+ Height: nextBlockHeight,
+ PreviousBlockHash: preBlockHash,
+ Timestamp: uint64(time.Now().Unix()),
+ BlockCommitment: types.BlockCommitment{},
}
+
+ b = &types.Block{BlockHeader: header}
bcBlock := &bc.Block{BlockHeader: &bc.BlockHeader{Height: nextBlockHeight}}
b.Transactions = []*types.Tx{nil}
if txFee == 0 {
return nil, err
}
+
+ // creater coinbase transaction
+ b.Transactions[0], err = createCoinbaseTx(accountManager, txFee, nextBlockHeight)
+ if err != nil {
+ return nil, errors.Wrap(err, "fail on createCoinbaseTx")
+ }
+ txEntries[0] = b.Transactions[0].Tx
+
+ b.BlockHeader.BlockCommitment.TransactionsMerkleRoot, err = types.TxMerkleRoot(txEntries)
+ if err != nil {
+ return nil, err
+ }
+
+ b.BlockHeader.BlockCommitment.TransactionStatusHash, err = types.TxStatusMerkleRoot(txStatus.VerifyStatus)
+ return b, err
+}
+
+// NewBlockTemplate returns a new block template that is ready to be solved
+func NewBlockTemplate1(c *protocol.Chain, txPool *protocol.TxPool, accountManager *account.Manager, engine engine.Engine) (b *types.Block, err error) {
+ view := state.NewUtxoViewpoint()
+ txStatus := bc.NewTransactionStatus()
+ if err := txStatus.SetStatus(0, false); err != nil {
+ return nil, err
+ }
+ txEntries := []*bc.Tx{nil}
+ gasUsed := uint64(0)
+ txFee := uint64(0)
+
+ // get preblock info for generate next block
+ preBlockHeader := c.BestBlockHeader()
+ preBlockHash := preBlockHeader.Hash()
+ nextBlockHeight := preBlockHeader.Height + 1
+
+ var xPrv chainkd.XPrv
+ if config.CommonConfig.Consensus.Dpos.XPrv == "" {
+ return nil, errors.New("Signer is empty")
+ }
+ xPrv.UnmarshalText([]byte(config.CommonConfig.Consensus.Dpos.XPrv))
+ xpub, _ := xPrv.XPub().MarshalText()
+
+ header := types.BlockHeader{
+ Version: 1,
+ Height: nextBlockHeight,
+ PreviousBlockHash: preBlockHash,
+ Timestamp: uint64(time.Now().Unix()),
+ BlockCommitment: types.BlockCommitment{},
+ Coinbase: xpub,
+ //Extra: make([]byte, 32+65),
+ }
+
+ if err := engine.Prepare(c, &header); err != nil {
+ log.Error("Failed to prepare header for mining", "err", err)
+ return nil, err
+ }
+
+ b = &types.Block{}
+ bcBlock := &bc.Block{BlockHeader: &bc.BlockHeader{Height: nextBlockHeight}}
+ b.Transactions = []*types.Tx{nil}
+
+ txs := txPool.GetTransactions()
+ sort.Sort(byTime(txs))
+ for _, txDesc := range txs {
+ tx := txDesc.Tx.Tx
+ gasOnlyTx := false
+
+ if err := c.GetTransactionsUtxo(view, []*bc.Tx{tx}); err != nil {
+ blkGenSkipTxForErr(txPool, &tx.ID, err)
+ continue
+ }
+ gasStatus, err := validation.ValidateTx(tx, bcBlock)
+ if err != nil {
+ if !gasStatus.GasValid {
+ blkGenSkipTxForErr(txPool, &tx.ID, err)
+ continue
+ }
+ gasOnlyTx = true
+ }
+
+ if gasUsed+uint64(gasStatus.GasUsed) > consensus.MaxBlockGas {
+ break
+ }
+
+ if err := view.ApplyTransaction(bcBlock, tx, gasOnlyTx); err != nil {
+ blkGenSkipTxForErr(txPool, &tx.ID, err)
+ continue
+ }
+
+ if err := txStatus.SetStatus(len(b.Transactions), gasOnlyTx); err != nil {
+ return nil, err
+ }
+
+ b.Transactions = append(b.Transactions, txDesc.Tx)
+ txEntries = append(txEntries, tx)
+ gasUsed += uint64(gasStatus.GasUsed)
+ txFee += txDesc.Fee
+ if gasUsed == consensus.MaxBlockGas {
+ break
+ }
+ }
+
+ if txFee == 0 {
+ return nil, nil
+ }
+
+ if err := engine.Finalize(c, &header, txEntries[1:]); err != nil {
+ return nil, err
+ }
+
+ b.BlockHeader = header
// creater coinbase transaction
b.Transactions[0], err = createCoinbaseTx(accountManager, txFee, nextBlockHeight)
if err != nil {
log "github.com/sirupsen/logrus"
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
+ "github.com/vapor/chain"
"github.com/vapor/protocol/bc"
)
// blockFetcher is responsible for accumulating block announcements from various peers
// and scheduling them for retrieval.
type blockFetcher struct {
- chain Chain
+ chain chain.Chain
peers *peerSet
newBlockCh chan *blockMsg
}
//NewBlockFetcher creates a block fetcher to retrieve blocks of the new mined.
-func newBlockFetcher(chain Chain, peers *peerSet) *blockFetcher {
+func newBlockFetcher(chain chain.Chain, peers *peerSet) *blockFetcher {
f := &blockFetcher{
chain: chain,
peers: peers,
log "github.com/sirupsen/logrus"
+ "github.com/vapor/chain"
"github.com/vapor/consensus"
"github.com/vapor/errors"
"github.com/vapor/mining/tensority"
}
type blockKeeper struct {
- chain Chain
+ chain chain.Chain
peers *peerSet
syncPeer *peer
headerList *list.List
}
-func newBlockKeeper(chain Chain, peers *peerSet) *blockKeeper {
+func newBlockKeeper(chain chain.Chain, peers *peerSet) *blockKeeper {
bk := &blockKeeper{
chain: chain,
peers: peers,
"github.com/tendermint/go-crypto"
cmn "github.com/tendermint/tmlibs/common"
+ "github.com/vapor/chain"
cfg "github.com/vapor/config"
"github.com/vapor/consensus"
"github.com/vapor/p2p"
maxFilterAddressCount = 1000
)
-// Chain is the interface for Bytom core
-type Chain interface {
- BestBlockHeader() *types.BlockHeader
- BestBlockHeight() uint64
- CalcNextSeed(*bc.Hash) (*bc.Hash, error)
- GetBlockByHash(*bc.Hash) (*types.Block, error)
- GetBlockByHeight(uint64) (*types.Block, error)
- GetHeaderByHash(*bc.Hash) (*types.BlockHeader, error)
- GetHeaderByHeight(uint64) (*types.BlockHeader, error)
- GetTransactionStatus(*bc.Hash) (*bc.TransactionStatus, error)
- InMainChain(bc.Hash) bool
- ProcessBlock(*types.Block) (bool, error)
- ValidateTx(*types.Tx) (bool, error)
-}
-
//SyncManager Sync Manager is responsible for the business layer information synchronization
type SyncManager struct {
sw *p2p.Switch
genesisHash bc.Hash
privKey crypto.PrivKeyEd25519 // local node's p2p key
- chain Chain
+ chain chain.Chain
txPool *core.TxPool
blockFetcher *blockFetcher
blockKeeper *blockKeeper
}
//NewSyncManager create a sync manager
-func NewSyncManager(config *cfg.Config, chain Chain, txPool *core.TxPool, newBlockCh chan *bc.Hash) (*SyncManager, error) {
+func NewSyncManager(config *cfg.Config, chain chain.Chain, txPool *core.TxPool, newBlockCh chan *bc.Hash) (*SyncManager, error) {
genesisHeader, err := chain.GetHeaderByHeight(0)
if err != nil {
return nil, err
"github.com/vapor/asset"
"github.com/vapor/blockchain/pseudohsm"
"github.com/vapor/blockchain/txfeed"
+ "github.com/vapor/common"
cfg "github.com/vapor/config"
"github.com/vapor/consensus"
+ engine "github.com/vapor/consensus/consensus"
+ "github.com/vapor/consensus/consensus/dpos"
"github.com/vapor/crypto/ed25519/chainkd"
"github.com/vapor/database/leveldb"
"github.com/vapor/env"
- "github.com/vapor/mining/cpuminer"
+ "github.com/vapor/mining/miner"
"github.com/vapor/mining/miningpool"
"github.com/vapor/net/websocket"
"github.com/vapor/netsync"
maxNewBlockChSize = 1024
)
+var consensusEngine engine.Engine
+
type Node struct {
cmn.BaseService
api *api.API
chain *protocol.Chain
txfeed *txfeed.Tracker
- cpuMiner *cpuminer.CPUMiner
- miningPool *miningpool.MiningPool
- miningEnable bool
+ //cpuMiner *cpuminer.CPUMiner
+ miner *miner.Miner
+
+ miningPool *miningpool.MiningPool
+ miningEnable bool
newBlockCh chan *bc.Hash
}
}
initLogFile(config)
initActiveNetParams(config)
+ initConsensusConfig(config)
initCommonConfig(config)
+
util.MainchainConfig = config.MainChain
util.ValidatePegin = config.ValidatePegin
// Get store
}
if !config.Wallet.Disable {
+ address, err := common.DecodeAddress(config.Consensus.Dpos.Coinbase, &consensus.ActiveNetParams)
+ if err != nil {
+ cmn.Exit(cmn.Fmt("DecodeAddress: %v", err))
+ }
walletDB := dbm.NewDB("wallet", config.DBBackend, config.DBDir())
accounts = account.NewManager(walletDB, chain)
assets = asset.NewRegistry(walletDB, chain)
- wallet, err = w.NewWallet(walletDB, accounts, assets, hsm, chain)
+ wallet, err = w.NewWallet(walletDB, accounts, assets, hsm, chain, address)
if err != nil {
log.WithField("error", err).Error("init NewWallet")
}
notificationMgr: notificationMgr,
}
- node.cpuMiner = cpuminer.NewCPUMiner(chain, accounts, txPool, newBlockCh)
+ //node.cpuMiner = cpuminer.NewCPUMiner(chain, accounts, txPool, newBlockCh)
+ consensusEngine = createConsensusEngine(config, store)
+ node.miner = miner.NewMiner(chain, accounts, txPool, newBlockCh, consensusEngine)
node.miningPool = miningpool.NewMiningPool(chain, accounts, txPool, newBlockCh)
node.BaseService = *cmn.NewBaseService(nil, "Node", node)
}
func (n *Node) initAndstartApiServer() {
- n.api = api.NewAPI(n.syncManager, n.wallet, n.txfeed, n.cpuMiner, n.miningPool, n.chain, n.config, n.accessTokens, n.newBlockCh, n.notificationMgr)
+ n.api = api.NewAPI(n.syncManager, n.wallet, n.txfeed, n.miner, n.miningPool, n.chain, n.config, n.accessTokens, n.newBlockCh, n.notificationMgr)
listenAddr := env.String("LISTEN", n.config.ApiAddress)
env.Parse()
n.miningEnable = false
log.Error(err)
} else {
- n.cpuMiner.Start()
+ //n.cpuMiner.Start()
+ n.miner.Start()
}
}
if !n.config.VaultMode {
n.notificationMgr.WaitForShutdown()
n.BaseService.OnStop()
if n.miningEnable {
- n.cpuMiner.Stop()
+ n.miner.Stop()
}
if !n.config.VaultMode {
n.syncManager.Stop()
json.Unmarshal(tmp, &blockHeader)
hash := blockHeader.BlockHeader.Hash()
if strings.Compare(consensus.ActiveNetParams.ParentGenesisBlockHash, hash.String()) != 0 {
- log.Error("Invalid parent genesis block hash response via RPC. Contacting wrong parent daemon?")
+ log.Error("Invalid parent genesis block hash response via RPC. Contacting wrong parent daemon?", consensus.ActiveNetParams.ParentGenesisBlockHash, hash.String())
return false
}
break
return true
}
+
+func initConsensusConfig(config *cfg.Config) {
+ if config.ConsensusConfigFile == "" {
+ // poa
+ } else {
+ //
+ file, err := os.Open(config.ConsensusConfigFile)
+ if err != nil {
+ cmn.Exit(cmn.Fmt("Failed to read consensus file: %v", err))
+ }
+ defer file.Close()
+
+ if err := json.NewDecoder(file).Decode(config); err != nil {
+ cmn.Exit(cmn.Fmt("invalid consensus file: %v", err))
+ }
+ for _, v := range config.Consensus.Dpos.SelfVoteSigners {
+ address, err := common.DecodeAddress(v, &consensus.ActiveNetParams)
+ if err != nil {
+ cmn.Exit(cmn.Fmt("Address resolution failed: %v", err))
+ }
+ config.Consensus.Dpos.Signers = append(config.Consensus.Dpos.Signers, address)
+ }
+ }
+}
+
+func createConsensusEngine(config *cfg.Config, store protocol.Store) engine.Engine {
+ if config.Consensus.Dpos != nil {
+ return dpos.New(config.Consensus.Dpos, store)
+ } else {
+ return nil
+ }
+}
+
+func GetConsensusEngine() engine.Engine {
+ return consensusEngine
+}
Issuance
Spend
Claim
+ Dpos
*/
package bc
type Proof struct {
Sign []byte `protobuf:"bytes,1,opt,name=sign,proto3" json:"sign,omitempty"`
ControlProgram []byte `protobuf:"bytes,2,opt,name=controlProgram,proto3" json:"controlProgram,omitempty"`
+ Address []byte `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"`
}
func (m *Proof) Reset() { *m = Proof{} }
return nil
}
+func (m *Proof) GetAddress() []byte {
+ if m != nil {
+ return m.Address
+ }
+ return nil
+}
+
type BytomBlockHeader struct {
Version uint64 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"`
Height uint64 `protobuf:"varint,2,opt,name=height" json:"height,omitempty"`
Nonce uint64 `protobuf:"varint,7,opt,name=nonce" json:"nonce,omitempty"`
Bits uint64 `protobuf:"varint,8,opt,name=bits" json:"bits,omitempty"`
TransactionStatus *TransactionStatus `protobuf:"bytes,9,opt,name=transaction_status,json=transactionStatus" json:"transaction_status,omitempty"`
- Extra []byte `protobuf:"bytes,10,opt,name=extra,proto3" json:"extra,omitempty"`
}
func (m *BytomBlockHeader) Reset() { *m = BytomBlockHeader{} }
return nil
}
-func (m *BytomBlockHeader) GetExtra() []byte {
- if m != nil {
- return m.Extra
- }
- return nil
-}
-
type BlockHeader struct {
Version uint64 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"`
Height uint64 `protobuf:"varint,2,opt,name=height" json:"height,omitempty"`
Bits uint64 `protobuf:"varint,8,opt,name=bits" json:"bits,omitempty"`
TransactionStatus *TransactionStatus `protobuf:"bytes,9,opt,name=transaction_status,json=transactionStatus" json:"transaction_status,omitempty"`
Proof *Proof `protobuf:"bytes,10,opt,name=Proof" json:"Proof,omitempty"`
+ Extra []byte `protobuf:"bytes,11,opt,name=extra,proto3" json:"extra,omitempty"`
+ Coinbase []byte `protobuf:"bytes,12,opt,name=coinbase,proto3" json:"coinbase,omitempty"`
}
func (m *BlockHeader) Reset() { *m = BlockHeader{} }
return nil
}
+func (m *BlockHeader) GetExtra() []byte {
+ if m != nil {
+ return m.Extra
+ }
+ return nil
+}
+
+func (m *BlockHeader) GetCoinbase() []byte {
+ if m != nil {
+ return m.Coinbase
+ }
+ return nil
+}
+
type TxHeader struct {
Version uint64 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"`
SerializedSize uint64 `protobuf:"varint,2,opt,name=serialized_size,json=serializedSize" json:"serialized_size,omitempty"`
return nil
}
+type Dpos struct {
+ SpentOutputId *Hash `protobuf:"bytes,1,opt,name=spent_output_id,json=spentOutputId" json:"spent_output_id,omitempty"`
+ WitnessDestination *ValueDestination `protobuf:"bytes,2,opt,name=witness_destination,json=witnessDestination" json:"witness_destination,omitempty"`
+ WitnessArguments [][]byte `protobuf:"bytes,3,rep,name=witness_arguments,json=witnessArguments,proto3" json:"witness_arguments,omitempty"`
+ Ordinal uint64 `protobuf:"varint,4,opt,name=ordinal" json:"ordinal,omitempty"`
+ Type uint32 `protobuf:"varint,5,opt,name=type" json:"type,omitempty"`
+ From string `protobuf:"bytes,6,opt,name=from" json:"from,omitempty"`
+ To string `protobuf:"bytes,7,opt,name=to" json:"to,omitempty"`
+ Stake uint64 `protobuf:"varint,8,opt,name=stake" json:"stake,omitempty"`
+ Data string `protobuf:"bytes,9,opt,name=data" json:"data,omitempty"`
+}
+
+func (m *Dpos) Reset() { *m = Dpos{} }
+func (m *Dpos) String() string { return proto.CompactTextString(m) }
+func (*Dpos) ProtoMessage() {}
+func (*Dpos) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
+
+func (m *Dpos) GetSpentOutputId() *Hash {
+ if m != nil {
+ return m.SpentOutputId
+ }
+ return nil
+}
+
+func (m *Dpos) GetWitnessDestination() *ValueDestination {
+ if m != nil {
+ return m.WitnessDestination
+ }
+ return nil
+}
+
+func (m *Dpos) GetWitnessArguments() [][]byte {
+ if m != nil {
+ return m.WitnessArguments
+ }
+ return nil
+}
+
+func (m *Dpos) GetOrdinal() uint64 {
+ if m != nil {
+ return m.Ordinal
+ }
+ return 0
+}
+
+func (m *Dpos) GetType() uint32 {
+ if m != nil {
+ return m.Type
+ }
+ return 0
+}
+
+func (m *Dpos) GetFrom() string {
+ if m != nil {
+ return m.From
+ }
+ return ""
+}
+
+func (m *Dpos) GetTo() string {
+ if m != nil {
+ return m.To
+ }
+ return ""
+}
+
+func (m *Dpos) GetStake() uint64 {
+ if m != nil {
+ return m.Stake
+ }
+ return 0
+}
+
+func (m *Dpos) GetData() string {
+ if m != nil {
+ return m.Data
+ }
+ return ""
+}
+
func init() {
proto.RegisterType((*Hash)(nil), "bc.Hash")
proto.RegisterType((*Program)(nil), "bc.Program")
proto.RegisterType((*Issuance)(nil), "bc.Issuance")
proto.RegisterType((*Spend)(nil), "bc.Spend")
proto.RegisterType((*Claim)(nil), "bc.Claim")
+ proto.RegisterType((*Dpos)(nil), "bc.Dpos")
}
func init() { proto.RegisterFile("bc.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
- // 1011 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0xcd, 0x6e, 0x1b, 0x37,
- 0x10, 0x86, 0xb4, 0xab, 0xbf, 0x91, 0x63, 0xd9, 0xb4, 0x93, 0x2e, 0x82, 0x14, 0x09, 0x16, 0x48,
- 0xdc, 0xa2, 0x80, 0x61, 0xcb, 0x69, 0x7b, 0xe9, 0xa1, 0x8e, 0xdd, 0x34, 0x3a, 0x18, 0x31, 0x68,
- 0xc3, 0xd7, 0x05, 0xb5, 0x4b, 0xc9, 0x44, 0x57, 0x4b, 0x95, 0xe4, 0xaa, 0xb6, 0x6f, 0x7d, 0x88,
- 0xa2, 0xa7, 0x3e, 0x47, 0x1f, 0xa1, 0xa7, 0x3e, 0x49, 0x5f, 0xa2, 0xe0, 0x2c, 0x57, 0x5a, 0xc9,
- 0xce, 0x1f, 0x8a, 0xa2, 0x28, 0x90, 0xdb, 0xce, 0x70, 0xf8, 0xcd, 0xcc, 0x37, 0xc3, 0x21, 0x17,
- 0xda, 0xc3, 0x78, 0x77, 0xaa, 0xa4, 0x91, 0xa4, 0x3e, 0x8c, 0xc3, 0x97, 0xe0, 0xbf, 0x62, 0xfa,
- 0x92, 0xac, 0x43, 0x7d, 0xb6, 0x17, 0xd4, 0x9e, 0xd4, 0x3e, 0x6b, 0xd2, 0xfa, 0x6c, 0x0f, 0xe5,
- 0xfd, 0xa0, 0xee, 0xe4, 0x7d, 0x94, 0xfb, 0x81, 0xe7, 0xe4, 0x3e, 0xca, 0x07, 0x81, 0xef, 0xe4,
- 0x83, 0xf0, 0x1b, 0x68, 0x9d, 0x2a, 0x39, 0x56, 0x6c, 0x42, 0x3e, 0x05, 0x98, 0x4d, 0xa2, 0x19,
- 0x57, 0x5a, 0xc8, 0x0c, 0x21, 0x7d, 0xda, 0x99, 0x4d, 0x2e, 0x0a, 0x05, 0x21, 0xe0, 0xc7, 0x32,
- 0xe1, 0x88, 0xbd, 0x46, 0xf1, 0x3b, 0x1c, 0x40, 0xeb, 0x50, 0x6b, 0x6e, 0x06, 0xc7, 0xff, 0x38,
- 0x90, 0x13, 0xe8, 0x22, 0xd4, 0xe1, 0x44, 0xe6, 0x99, 0x21, 0xcf, 0xa0, 0xcd, 0xac, 0x18, 0x89,
- 0x04, 0x41, 0xbb, 0xfd, 0xee, 0xee, 0x30, 0xde, 0x75, 0xde, 0x68, 0x0b, 0x17, 0x07, 0x09, 0x79,
- 0x00, 0x4d, 0x86, 0x3b, 0xd0, 0x95, 0x4f, 0x9d, 0x14, 0x8e, 0xa1, 0x87, 0xb6, 0xc7, 0x7c, 0x24,
- 0x32, 0x61, 0x6c, 0x02, 0x5f, 0xc1, 0x86, 0xd0, 0x3a, 0x67, 0x59, 0xcc, 0xa3, 0x69, 0x91, 0x73,
- 0x15, 0xda, 0xd1, 0x40, 0x7b, 0xa5, 0x51, 0xc9, 0xcb, 0x23, 0xf0, 0x13, 0x66, 0x18, 0x3a, 0xe8,
- 0xf6, 0xdb, 0xd6, 0xd6, 0x52, 0x4f, 0x51, 0x1b, 0xa6, 0xd0, 0xbd, 0x60, 0x69, 0xce, 0xcf, 0x64,
- 0xae, 0x62, 0x4e, 0x1e, 0x82, 0xa7, 0xf8, 0xc8, 0xe1, 0x2e, 0x6c, 0xad, 0x92, 0x3c, 0x85, 0xc6,
- 0xcc, 0x9a, 0x3a, 0xa4, 0xde, 0x3c, 0xa1, 0x22, 0x67, 0x5a, 0xac, 0x92, 0x87, 0xd0, 0x9e, 0x4a,
- 0x8d, 0x31, 0x23, 0x5f, 0x3e, 0x9d, 0xcb, 0xe1, 0x8f, 0xb0, 0x81, 0xde, 0x8e, 0xb9, 0x36, 0x22,
- 0x63, 0x98, 0xd7, 0xbf, 0xec, 0xf2, 0x08, 0x1a, 0xa7, 0x4a, 0xca, 0x91, 0x6d, 0x00, 0x2d, 0xc6,
- 0x45, 0x67, 0xac, 0x51, 0xfc, 0x26, 0xcf, 0x60, 0x3d, 0x96, 0x99, 0x51, 0x32, 0x75, 0x6c, 0xb9,
- 0xf6, 0x58, 0xd1, 0x86, 0xbf, 0x7a, 0xb0, 0xf1, 0xe2, 0xda, 0xc8, 0xc9, 0x8b, 0x54, 0xc6, 0x3f,
- 0xbc, 0xe2, 0x2c, 0xe1, 0x8a, 0x04, 0xd0, 0x5a, 0xee, 0xb6, 0x52, 0xb4, 0x55, 0xbd, 0xe4, 0x62,
- 0x7c, 0x39, 0xaf, 0x6a, 0x21, 0x91, 0xe7, 0xb0, 0x39, 0x55, 0x7c, 0x26, 0x64, 0xae, 0xa3, 0xa1,
- 0x45, 0xb2, 0xed, 0xe1, 0xad, 0x24, 0xde, 0x2b, 0x4d, 0xd0, 0xd7, 0x20, 0x21, 0x8f, 0xa0, 0x63,
- 0xc4, 0x84, 0x6b, 0xc3, 0x26, 0x53, 0xec, 0x38, 0x9f, 0x2e, 0x14, 0xe4, 0x4b, 0xd8, 0x34, 0x8a,
- 0x65, 0x9a, 0xc5, 0x36, 0x5d, 0x1d, 0x29, 0x29, 0x4d, 0xd0, 0x58, 0xc1, 0xdc, 0xa8, 0x9a, 0x50,
- 0x29, 0x0d, 0xf9, 0x16, 0x3e, 0xa9, 0xe8, 0x22, 0x6d, 0x98, 0xc9, 0x75, 0x74, 0xc9, 0xf4, 0x65,
- 0xd0, 0x5c, 0xd9, 0x7c, 0xbf, 0x62, 0x78, 0x86, 0x76, 0x78, 0x74, 0xb7, 0xa1, 0x91, 0xc9, 0x2c,
- 0xe6, 0x41, 0x0b, 0x43, 0x2a, 0x04, 0xcb, 0xf2, 0x50, 0x18, 0x1d, 0xb4, 0x51, 0x89, 0xdf, 0xe4,
- 0x18, 0xc8, 0x6d, 0x5f, 0x41, 0x07, 0xdd, 0xdc, 0xb7, 0x6e, 0xce, 0x57, 0x1d, 0xd0, 0xcd, 0x5b,
- 0x3e, 0xad, 0x3f, 0x7e, 0x65, 0x14, 0x0b, 0x00, 0x4b, 0x54, 0x08, 0xe1, 0x6f, 0x1e, 0x74, 0x3f,
- 0x16, 0xe5, 0x3f, 0x2b, 0xca, 0x63, 0x77, 0xba, 0xb0, 0x28, 0xdd, 0x7e, 0xc7, 0x4d, 0x22, 0x39,
- 0xa2, 0x85, 0x3e, 0xfc, 0xa5, 0x06, 0xed, 0xf3, 0xab, 0x77, 0x16, 0x67, 0x07, 0x7a, 0x9a, 0x2b,
- 0xc1, 0x52, 0x71, 0xc3, 0x93, 0x48, 0x8b, 0x1b, 0xee, 0xaa, 0xb4, 0xbe, 0x50, 0x9f, 0x89, 0x1b,
- 0x6e, 0xa7, 0xbc, 0xa5, 0x39, 0x52, 0x2c, 0x1b, 0x73, 0x77, 0xd8, 0x91, 0x78, 0x6a, 0x15, 0x64,
- 0x07, 0x40, 0x71, 0x9d, 0xa7, 0x76, 0xf0, 0xea, 0xc0, 0x7f, 0xe2, 0x2d, 0x91, 0xd6, 0x29, 0xd6,
- 0x06, 0x89, 0x0e, 0xf7, 0x61, 0xfd, 0xfc, 0xea, 0x82, 0x2b, 0x31, 0xba, 0xa6, 0xa8, 0x24, 0x8f,
- 0xa1, 0xeb, 0x08, 0x1f, 0x31, 0x91, 0x62, 0x80, 0x6d, 0x0a, 0x85, 0xea, 0x25, 0x13, 0x69, 0x38,
- 0x82, 0xcd, 0x5b, 0x9c, 0xbc, 0x25, 0xa5, 0xaf, 0xe1, 0xde, 0x0c, 0xf1, 0x4b, 0x6e, 0xeb, 0x18,
- 0x0d, 0x41, 0x6e, 0x97, 0x5c, 0xd3, 0xb5, 0xc2, 0xb0, 0x80, 0x0c, 0xff, 0xac, 0x81, 0x77, 0x92,
- 0x5f, 0x91, 0xcf, 0xa1, 0xa5, 0x71, 0x2a, 0xeb, 0xa0, 0x86, 0x5b, 0x71, 0xfc, 0x55, 0xa6, 0x35,
- 0x2d, 0xd7, 0xc9, 0x53, 0x68, 0x4d, 0x2b, 0x03, 0x6c, 0xe5, 0x4a, 0x28, 0xd7, 0xc8, 0xf7, 0xb0,
- 0xfd, 0x93, 0x30, 0x19, 0xd7, 0x3a, 0x4a, 0x16, 0x13, 0x58, 0x07, 0x1e, 0xc2, 0x6f, 0xcf, 0xe1,
- 0x2b, 0xe3, 0x99, 0x6e, 0xb9, 0x1d, 0x15, 0x9d, 0x26, 0x5f, 0xc0, 0x66, 0x09, 0xc4, 0xd4, 0x38,
- 0x9f, 0xf0, 0xcc, 0x14, 0x6c, 0xaf, 0xd1, 0x0d, 0xb7, 0x70, 0x58, 0xea, 0x43, 0x09, 0xed, 0x23,
- 0x29, 0xb2, 0x21, 0xd3, 0x9c, 0x7c, 0x07, 0x5b, 0x77, 0x44, 0xe0, 0x86, 0xff, 0xdd, 0x01, 0x90,
- 0xdb, 0x01, 0xd8, 0xd3, 0xc7, 0xd4, 0x50, 0x18, 0xc5, 0xd4, 0xb5, 0x1b, 0xd9, 0x0b, 0x45, 0xf8,
- 0x73, 0x0d, 0x9a, 0xaf, 0x73, 0x33, 0xcd, 0x0d, 0xd9, 0x81, 0x66, 0xc1, 0x91, 0x73, 0x71, 0x8b,
- 0x42, 0xb7, 0x4c, 0x9e, 0x43, 0xcf, 0xcd, 0xfc, 0xe8, 0x2d, 0x4c, 0xae, 0xdc, 0x0b, 0xb6, 0xfa,
- 0x52, 0x25, 0x22, 0x63, 0xa9, 0x6b, 0xc5, 0x52, 0x0c, 0x5f, 0x03, 0x50, 0x6e, 0x84, 0xe2, 0x96,
- 0x83, 0xf7, 0x0f, 0xa3, 0x02, 0x58, 0x5f, 0x06, 0xfc, 0xbd, 0x0e, 0xed, 0x81, 0xbb, 0xda, 0x6d,
- 0x9b, 0xe3, 0xc9, 0x2e, 0x66, 0xc3, 0xea, 0xd5, 0xd9, 0xc1, 0x35, 0x9c, 0x07, 0xef, 0x79, 0x81,
- 0xbe, 0xa1, 0x2c, 0xde, 0x07, 0x96, 0xe5, 0x04, 0x82, 0x79, 0x5b, 0xe0, 0xeb, 0x27, 0x99, 0x3f,
- 0x5f, 0x70, 0x46, 0x76, 0xfb, 0x5b, 0xf3, 0x00, 0x16, 0x2f, 0x1b, 0xfa, 0xa0, 0x6c, 0x99, 0x95,
- 0x17, 0xcf, 0x9d, 0x5d, 0xd6, 0xb8, 0xbb, 0xcb, 0xaa, 0xcc, 0x35, 0x97, 0x99, 0xfb, 0xa3, 0x06,
- 0x8d, 0xb3, 0x29, 0xcf, 0x12, 0xb2, 0x07, 0x3d, 0x3d, 0xe5, 0x99, 0x89, 0x24, 0x76, 0xc7, 0xe2,
- 0x71, 0xb6, 0xe0, 0xee, 0x1e, 0x1a, 0x14, 0xdd, 0x33, 0x48, 0xde, 0x44, 0x4c, 0xfd, 0x03, 0x89,
- 0xb9, 0x33, 0x13, 0xef, 0xdd, 0x99, 0xf8, 0xcb, 0x99, 0xfc, 0x55, 0x83, 0xc6, 0x51, 0xca, 0xc4,
- 0xe4, 0xff, 0x9e, 0x09, 0x09, 0x61, 0xed, 0x94, 0x8f, 0x45, 0xe6, 0xb6, 0xb8, 0xaa, 0x2e, 0xe9,
- 0x86, 0x4d, 0xfc, 0x5d, 0x38, 0xf8, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x25, 0x3b, 0x75, 0x3a, 0x3a,
- 0x0c, 0x00, 0x00,
+ // 1091 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0xcd, 0x6e, 0xdb, 0x46,
+ 0x10, 0x86, 0x24, 0xea, 0x6f, 0x24, 0x5b, 0xf6, 0xda, 0x49, 0x89, 0x20, 0x45, 0x02, 0x02, 0x89,
+ 0x5b, 0x14, 0x30, 0xfc, 0x93, 0xb6, 0x97, 0x1e, 0xea, 0xc4, 0x4d, 0xa3, 0x83, 0x11, 0x63, 0x6d,
+ 0xf8, 0x56, 0x10, 0x2b, 0x72, 0x25, 0x2f, 0x22, 0x71, 0xd9, 0xdd, 0xa5, 0x6a, 0xfb, 0x96, 0x87,
+ 0xe8, 0x3b, 0xf4, 0x0d, 0xfa, 0x08, 0x3d, 0x15, 0x7d, 0x90, 0xbe, 0x44, 0xb1, 0xc3, 0xa5, 0x44,
+ 0xc9, 0xca, 0x1f, 0x8a, 0xa2, 0x08, 0x90, 0x1b, 0xe7, 0x67, 0xe7, 0xe7, 0x9b, 0xd9, 0x99, 0x25,
+ 0xb4, 0x06, 0xd1, 0x6e, 0xaa, 0xa4, 0x91, 0xa4, 0x3a, 0x88, 0x82, 0xe7, 0xe0, 0xbd, 0x60, 0xfa,
+ 0x92, 0xac, 0x43, 0x75, 0xba, 0xe7, 0x57, 0x1e, 0x56, 0xbe, 0x68, 0xd0, 0xea, 0x74, 0x0f, 0xe9,
+ 0x7d, 0xbf, 0xea, 0xe8, 0x7d, 0xa4, 0x0f, 0xfc, 0x9a, 0xa3, 0x0f, 0x90, 0x3e, 0xf4, 0x3d, 0x47,
+ 0x1f, 0x06, 0xdf, 0x41, 0xf3, 0x54, 0xc9, 0x91, 0x62, 0x13, 0xf2, 0x39, 0xc0, 0x74, 0x12, 0x4e,
+ 0xb9, 0xd2, 0x42, 0x26, 0x68, 0xd2, 0xa3, 0xed, 0xe9, 0xe4, 0x22, 0x67, 0x10, 0x02, 0x5e, 0x24,
+ 0x63, 0x8e, 0xb6, 0xbb, 0x14, 0xbf, 0x83, 0x3e, 0x34, 0x8f, 0xb4, 0xe6, 0xa6, 0x7f, 0xfc, 0xaf,
+ 0x03, 0x39, 0x81, 0x0e, 0x9a, 0x3a, 0x9a, 0xc8, 0x2c, 0x31, 0xe4, 0x31, 0xb4, 0x98, 0x25, 0x43,
+ 0x11, 0xa3, 0xd1, 0xce, 0x41, 0x67, 0x77, 0x10, 0xed, 0x3a, 0x6f, 0xb4, 0x89, 0xc2, 0x7e, 0x4c,
+ 0xee, 0x42, 0x83, 0xe1, 0x09, 0x74, 0xe5, 0x51, 0x47, 0x05, 0x23, 0xe8, 0xa1, 0xee, 0x31, 0x1f,
+ 0x8a, 0x44, 0x18, 0x9b, 0xc0, 0x37, 0xb0, 0x21, 0xb4, 0xce, 0x58, 0x12, 0xf1, 0x30, 0xcd, 0x73,
+ 0x2e, 0x9b, 0x76, 0x30, 0xd0, 0x5e, 0xa1, 0x54, 0xe0, 0x72, 0x1f, 0xbc, 0x98, 0x19, 0x86, 0x0e,
+ 0x3a, 0x07, 0x2d, 0xab, 0x6b, 0xa1, 0xa7, 0xc8, 0x0d, 0xc6, 0xd0, 0xb9, 0x60, 0xe3, 0x8c, 0x9f,
+ 0xc9, 0x4c, 0x45, 0x9c, 0xdc, 0x83, 0x9a, 0xe2, 0x43, 0x67, 0x77, 0xae, 0x6b, 0x99, 0xe4, 0x11,
+ 0xd4, 0xa7, 0x56, 0xd5, 0x59, 0xea, 0xcd, 0x12, 0xca, 0x73, 0xa6, 0xb9, 0x94, 0xdc, 0x83, 0x56,
+ 0x2a, 0x35, 0xc6, 0x8c, 0x78, 0x79, 0x74, 0x46, 0x07, 0x3f, 0xc3, 0x06, 0x7a, 0x3b, 0xe6, 0xda,
+ 0x88, 0x84, 0x61, 0x5e, 0xff, 0xb1, 0xcb, 0x9f, 0xa0, 0x7e, 0xaa, 0xa4, 0x1c, 0xda, 0x06, 0xd0,
+ 0x62, 0x94, 0x77, 0x46, 0x97, 0xe2, 0x37, 0x79, 0x0c, 0xeb, 0x91, 0x4c, 0x8c, 0x92, 0x63, 0x87,
+ 0x96, 0x6b, 0x8f, 0x25, 0x2e, 0xf1, 0xa1, 0xc9, 0xe2, 0x58, 0x71, 0xad, 0xd1, 0x7e, 0x97, 0x16,
+ 0x64, 0xf0, 0xba, 0x06, 0x1b, 0x4f, 0xaf, 0x8d, 0x9c, 0x3c, 0x1d, 0xcb, 0xe8, 0xd5, 0x0b, 0xce,
+ 0x62, 0xae, 0xac, 0xfa, 0x62, 0x1f, 0x16, 0xa4, 0xad, 0xf7, 0x25, 0x17, 0xa3, 0xcb, 0x59, 0xbd,
+ 0x73, 0x8a, 0x3c, 0x81, 0xcd, 0x54, 0xf1, 0xa9, 0x90, 0x99, 0x0e, 0x07, 0xd6, 0x92, 0x6d, 0x9c,
+ 0xda, 0x12, 0x24, 0xbd, 0x42, 0x05, 0x7d, 0xf5, 0x63, 0x72, 0x1f, 0xda, 0x46, 0x4c, 0xb8, 0x36,
+ 0x6c, 0x92, 0x62, 0x2f, 0x7a, 0x74, 0xce, 0x20, 0x5f, 0xc3, 0xa6, 0x51, 0x2c, 0xd1, 0x2c, 0xb2,
+ 0x40, 0xe8, 0x50, 0x49, 0x69, 0xfc, 0xfa, 0x92, 0xcd, 0x8d, 0xb2, 0x0a, 0x95, 0xd2, 0x90, 0xef,
+ 0xe1, 0xb3, 0x12, 0x2f, 0xd4, 0x86, 0x99, 0x4c, 0x87, 0x97, 0x4c, 0x5f, 0xfa, 0x8d, 0xa5, 0xc3,
+ 0x77, 0x4a, 0x8a, 0x67, 0xa8, 0x87, 0x97, 0x7a, 0x1b, 0xea, 0x89, 0x4c, 0x22, 0xee, 0x37, 0x31,
+ 0xa4, 0x9c, 0xb0, 0xf8, 0x0f, 0x84, 0xd1, 0x7e, 0x0b, 0x99, 0xf8, 0x4d, 0x8e, 0x81, 0xdc, 0xf6,
+ 0xe5, 0xb7, 0xd1, 0xcd, 0x1d, 0xeb, 0xe6, 0x7c, 0xd9, 0x01, 0xdd, 0xbc, 0xe5, 0x33, 0xf8, 0xab,
+ 0x06, 0x9d, 0x4f, 0xf0, 0xff, 0x5f, 0xf0, 0x93, 0x07, 0xee, 0x86, 0xf9, 0x80, 0x07, 0xdb, 0x6e,
+ 0x1a, 0xc9, 0x21, 0x75, 0x37, 0x6f, 0x1b, 0xea, 0xfc, 0xca, 0x28, 0xe6, 0x77, 0xf0, 0xee, 0xe4,
+ 0x84, 0xbd, 0xb4, 0x91, 0x14, 0xc9, 0x80, 0x69, 0xee, 0x77, 0x51, 0x30, 0xa3, 0x83, 0x5f, 0x2b,
+ 0xd0, 0x3a, 0xbf, 0x7a, 0x67, 0x39, 0x77, 0xa0, 0xa7, 0xb9, 0x12, 0x6c, 0x2c, 0x6e, 0x78, 0x1c,
+ 0x6a, 0x71, 0xc3, 0x5d, 0x5d, 0xd7, 0xe7, 0xec, 0x33, 0x71, 0xc3, 0xed, 0x6e, 0xb0, 0x85, 0x09,
+ 0x15, 0x4b, 0x46, 0xdc, 0x8d, 0x08, 0x2c, 0x15, 0xb5, 0x0c, 0xb2, 0x03, 0xa0, 0xb8, 0xce, 0xc6,
+ 0x76, 0x5c, 0x6b, 0xdf, 0x7b, 0x58, 0x5b, 0x80, 0xb9, 0x9d, 0xcb, 0xfa, 0xb1, 0x0e, 0xf6, 0x61,
+ 0xfd, 0xfc, 0xea, 0x82, 0x2b, 0x31, 0xbc, 0xa6, 0xc8, 0x24, 0x0f, 0xa0, 0xe3, 0x4a, 0x34, 0x64,
+ 0x62, 0x8c, 0x01, 0xb6, 0x28, 0xe4, 0xac, 0xe7, 0x4c, 0x8c, 0x83, 0x21, 0x6c, 0xde, 0x42, 0xf1,
+ 0x2d, 0x29, 0x7d, 0x0b, 0x6b, 0x53, 0xb4, 0x5f, 0x54, 0xa3, 0x8a, 0xd1, 0x10, 0xac, 0xc6, 0x82,
+ 0x6b, 0xda, 0xcd, 0x15, 0xdd, 0x25, 0xf8, 0xb3, 0x02, 0xb5, 0x93, 0xec, 0x8a, 0x7c, 0x09, 0x4d,
+ 0x8d, 0xb3, 0x5c, 0xfb, 0x15, 0x3c, 0x8a, 0x43, 0xb3, 0x34, 0xe3, 0x69, 0x21, 0x27, 0x8f, 0xa0,
+ 0x99, 0x96, 0xc6, 0xde, 0xd2, 0x22, 0x29, 0x64, 0xe4, 0x47, 0xd8, 0xfe, 0x45, 0x98, 0x84, 0x6b,
+ 0x1d, 0xc6, 0xf3, 0xb9, 0x6d, 0x27, 0xa1, 0x35, 0xbf, 0x3d, 0x33, 0x5f, 0x1a, 0xea, 0x74, 0xcb,
+ 0x9d, 0x28, 0xf1, 0x34, 0xf9, 0x0a, 0x36, 0x0b, 0x43, 0x4c, 0x8d, 0xb2, 0x09, 0x4f, 0x4c, 0x8e,
+ 0x76, 0x97, 0x6e, 0x38, 0xc1, 0x51, 0xc1, 0x0f, 0x24, 0xb4, 0x9e, 0xb9, 0x76, 0x20, 0x3f, 0xc0,
+ 0xd6, 0x8a, 0x08, 0xdc, 0xca, 0x58, 0x1d, 0x00, 0xb9, 0x1d, 0x80, 0xbd, 0xaf, 0x4c, 0x0d, 0x84,
+ 0x51, 0x4c, 0x5d, 0xbb, 0x41, 0x3f, 0x67, 0x04, 0xaf, 0x2b, 0xd0, 0x78, 0x99, 0x99, 0x34, 0x33,
+ 0x64, 0x07, 0x1a, 0x39, 0x46, 0xce, 0xc5, 0x2d, 0x08, 0x9d, 0x98, 0x3c, 0x81, 0x9e, 0xdb, 0x14,
+ 0xe1, 0x5b, 0x90, 0x5c, 0xb1, 0x4d, 0xa4, 0x8a, 0x45, 0xc2, 0xc6, 0xae, 0x15, 0x0b, 0x32, 0x78,
+ 0x09, 0x40, 0xb9, 0x11, 0x8a, 0x5b, 0x0c, 0xde, 0x3f, 0x8c, 0x92, 0xc1, 0xea, 0xa2, 0xc1, 0xdf,
+ 0xab, 0xd0, 0xea, 0xbb, 0x07, 0x81, 0x6d, 0x73, 0x9c, 0x05, 0xf9, 0x34, 0x59, 0x5e, 0xb8, 0x6d,
+ 0x94, 0xe1, 0x04, 0x79, 0xcf, 0xb5, 0xfb, 0x86, 0xb2, 0xd4, 0x3e, 0xb0, 0x2c, 0x27, 0xe0, 0xcf,
+ 0xda, 0x02, 0xdf, 0x4c, 0xf1, 0xec, 0xd1, 0x83, 0x53, 0xb5, 0x73, 0xb0, 0x35, 0x0b, 0x60, 0xfe,
+ 0x1e, 0xa2, 0x77, 0x8b, 0x96, 0x59, 0x7a, 0x27, 0xad, 0xec, 0xb2, 0xfa, 0xea, 0x2e, 0x2b, 0x23,
+ 0xd7, 0x58, 0x44, 0xee, 0x8f, 0x0a, 0xd4, 0xcf, 0x52, 0x9e, 0xc4, 0x64, 0x0f, 0x7a, 0x3a, 0xe5,
+ 0x89, 0x09, 0x25, 0x76, 0xc7, 0xfc, 0x49, 0x37, 0xc7, 0x6e, 0x0d, 0x15, 0xf2, 0xee, 0xe9, 0xc7,
+ 0x6f, 0x02, 0xa6, 0xfa, 0x81, 0xc0, 0xac, 0xcc, 0xa4, 0xf6, 0xee, 0x4c, 0xbc, 0xc5, 0x4c, 0xfe,
+ 0xae, 0x40, 0xfd, 0xd9, 0x98, 0x89, 0xc9, 0xc7, 0x9e, 0x09, 0x09, 0xa0, 0x7b, 0xca, 0x47, 0x22,
+ 0x71, 0x47, 0x5c, 0x55, 0x17, 0x78, 0xc1, 0x6f, 0x55, 0xf0, 0x8e, 0x53, 0xa9, 0x3f, 0xfa, 0x64,
+ 0x09, 0x78, 0xe6, 0x3a, 0xe5, 0xf8, 0x64, 0x58, 0xa3, 0xf8, 0x6d, 0x79, 0x43, 0x25, 0x27, 0xd8,
+ 0xab, 0x6d, 0x8a, 0xdf, 0xf6, 0x4f, 0xc4, 0x48, 0xdc, 0xf5, 0x6d, 0x5a, 0x35, 0xd2, 0x6e, 0x5b,
+ 0x6d, 0xd8, 0x2b, 0xee, 0x36, 0x7d, 0x4e, 0xd8, 0x93, 0xf8, 0x17, 0xd0, 0xce, 0x4f, 0xda, 0xef,
+ 0x41, 0x03, 0xff, 0xc7, 0x0e, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x3a, 0x3c, 0x20, 0xe3, 0x9b,
+ 0x0d, 0x00, 0x00,
}
message Proof {
bytes sign = 1;
bytes controlProgram = 2;
+ bytes address = 3;
}
message BytomBlockHeader {
uint64 nonce = 7;
uint64 bits = 8;
TransactionStatus transaction_status = 9;
- bytes extra = 10;
}
message BlockHeader {
uint64 bits = 8;
TransactionStatus transaction_status = 9;
Proof Proof = 10;
+ bytes extra = 11;
+ bytes coinbase = 12;
}
message TxHeader {
repeated bytes witness_arguments = 3;
uint64 ordinal = 4;
repeated bytes Peginwitness = 5;
-}
\ No newline at end of file
+}
+
+message Dpos {
+ Hash spent_output_id = 1;
+ ValueDestination witness_destination = 2;
+ repeated bytes witness_arguments = 3;
+ uint64 ordinal = 4;
+ uint32 type = 5;
+ string from = 6;
+ string to = 7;
+ uint64 stake = 8;
+ string data = 9;
+}
mustWriteForHash(w, bh.Height)
mustWriteForHash(w, bh.PreviousBlockId)
mustWriteForHash(w, bh.Timestamp)
+ //mustWriteForHash(w, bh.Coinbase)
mustWriteForHash(w, bh.TransactionsRoot)
mustWriteForHash(w, bh.TransactionStatusHash)
mustWriteForHash(w, bh.Proof.Sign)
mustWriteForHash(w, bh.Proof.ControlProgram)
+ mustWriteForHash(w, bh.Extra)
}
// NewBlockHeader creates a new BlockHeader and populates
// its body.
-func NewBlockHeader(version, height uint64, previousBlockID *Hash, timestamp uint64, transactionsRoot, transactionStatusHash *Hash, proof *Proof) *BlockHeader {
+func NewBlockHeader(version, height uint64, previousBlockID *Hash, timestamp uint64, transactionsRoot, transactionStatusHash *Hash, proof *Proof, extra []byte, coinbase []byte) *BlockHeader {
return &BlockHeader{
Version: version,
Height: height,
TransactionStatusHash: transactionStatusHash,
TransactionStatus: nil,
Proof: proof,
+ Extra: extra,
+ Coinbase: coinbase,
}
}
--- /dev/null
+package bc
+
+import (
+ "io"
+)
+
+func (Dpos) typ() string { return "dpos1" }
+func (d *Dpos) writeForHash(w io.Writer) {
+ mustWriteForHash(w, d.SpentOutputId)
+ mustWriteForHash(w, d.Type)
+ mustWriteForHash(w, d.From)
+ mustWriteForHash(w, d.To)
+ mustWriteForHash(w, d.Stake)
+}
+
+// SetDestination will link the spend to the output
+func (d *Dpos) SetDestination(id *Hash, val *AssetAmount, pos uint64) {
+ d.WitnessDestination = &ValueDestination{
+ Ref: id,
+ Value: val,
+ Position: pos,
+ }
+}
+
+// NewDpos creates a new Spend.
+func NewDpos(spentOutputID *Hash, ordinal uint64, t uint32, stake uint64, from, to, data string) *Dpos {
+ return &Dpos{
+ SpentOutputId: spentOutputID,
+ Ordinal: ordinal,
+ Type: t,
+ From: from,
+ To: to,
+ Stake: stake,
+ Data: data,
+ }
+}
binary.LittleEndian.PutUint64(buf[:], v)
_, err := w.Write(buf[:])
return errors.Wrapf(err, "writing uint64 (%d) for hash", v)
+ case uint32:
+ buf := [8]byte{}
+ binary.LittleEndian.PutUint32(buf[:], v)
+ _, err := w.Write(buf[:])
+ return errors.Wrapf(err, "writing uint64 (%d) for hash", v)
case []byte:
_, err := blockchain.WriteVarstr31(w, v)
return errors.Wrapf(err, "writing []byte (len %d) for hash", len(v))
type Proof struct {
Sign []byte
ControlProgram []byte
+ Address []byte
}
func (p *Proof) readFrom(r *blockchain.Reader) (err error) {
if p.ControlProgram, err = blockchain.ReadVarstr31(r); err != nil {
return err
}
+ if p.Address, err = blockchain.ReadVarstr31(r); err != nil {
+ return err
+ }
return nil
}
if _, err := blockchain.WriteVarstr31(w, p.ControlProgram); err != nil {
return err
}
+ if _, err := blockchain.WriteVarstr31(w, p.Address); err != nil {
+ return err
+ }
return nil
}
Height uint64 // The height of the block.
PreviousBlockHash bc.Hash // The hash of the previous block.
Timestamp uint64 // The time of the block in seconds.
+ Coinbase []byte
Proof Proof
+ Extra []byte
BlockCommitment
}
if bh.Timestamp, err = blockchain.ReadVarint63(r); err != nil {
return 0, err
}
+ if bh.Coinbase, err = blockchain.ReadVarstr31(r); err != nil {
+ return 0, err
+ }
if _, err = blockchain.ReadExtensibleString(r, bh.BlockCommitment.readFrom); err != nil {
return 0, err
}
if _, err = blockchain.ReadExtensibleString(r, bh.Proof.readFrom); err != nil {
return 0, err
}
+ if bh.Extra, err = blockchain.ReadVarstr31(r); err != nil {
+ return 0, err
+ }
return
}
if _, err = blockchain.WriteVarint63(w, bh.Timestamp); err != nil {
return err
}
+ if _, err := blockchain.WriteVarstr31(w, bh.Coinbase); err != nil {
+ return err
+ }
if _, err = blockchain.WriteExtensibleString(w, nil, bh.BlockCommitment.writeTo); err != nil {
return err
}
if _, err = blockchain.WriteExtensibleString(w, nil, bh.Proof.writeTo); err != nil {
return err
}
+ if _, err = blockchain.WriteVarstr31(w, bh.Extra); err != nil {
+ return err
+ }
return nil
}
--- /dev/null
+package types
+
+import (
+ "testing"
+
+ "github.com/vapor/protocol/bc"
+)
+
+func TestRawBlock(t *testing.T) {
+ strRawBlock := "0301629f91cdd7f923fbb5283390f3c52b3c2559fcf15de352bae10b7ea0fc50f3e650dfe89de0054066408c9a91229b63bbb2b7510887a0519dcc6446c5b045bb8444afa939368e4f6978a65b4ee5b6f4914fe5c05000459a803ecf59132604e5d334d64249c5e50a0ecc99b38080808080200207010001010502030039380001013effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa090fbd59901011600143143baf513dead7f3478479401eb1ed38eb1e19f00070100010160015e89a6c0f09e3e512f01dc1e4cf42d28eb67b76def47c59257448af26945f542d8ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc09af7934f0001160014cc443b29275271cd5e793430fdb7e57e3eae247663024024245117a930a6d398c854fb31bb5e3921d82e357d58778d72e0b4b1f0a3583d0d02a399b06ec5209c86b01f590c36fb29b3f2a3dbbe4b5f6bd9d852085967022013604b73c1df1aff3c2503eff8aa93498dcc7800051f74814123980e09e76a0702013dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa0db90f3290116001497e164c8f82edc1412f7d87e37f84fb44b55c012000149ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80c8afa02501220020df81990a397ca4df7afb786e5fcd33951259e9b7341934c15c7362997a09c51e00"
+ block := &Block{}
+ err := block.UnmarshalText([]byte(strRawBlock))
+ if err != nil {
+ t.Errorf("UnmarshalText err : %s", err)
+ }
+ strTxID := "f7a046ac947efa7da14696c2aa667f4657802609a6aafc056ee4c8d7f4940a83"
+ txID := bc.Hash{}
+ txID.UnmarshalText([]byte(strTxID))
+ for _, tx := range block.Transactions {
+ tmp := tx.ID.String()
+ if tmp == txID.String() {
+ break
+ }
+ }
+ block.Hash()
+}
"github.com/vapor/protocol/bc"
)
-type ClaimCommitment struct {
- bc.AssetAmount
- VMVersion uint64
- ControlProgram []byte
-}
// ClaimInput satisfies the TypedInput interface and represents a spend transaction.
type ClaimInput struct {
SpendCommitment
}
-// NewClaimInputInput create a new SpendInput struct.
-func NewClaimInputInput(arguments [][]byte, sourceID bc.Hash, assetID bc.AssetID, amount, sourcePos uint64, controlProgram []byte) *TxInput {
+// NewClaimInput create a new SpendInput struct.
+func NewClaimInput(arguments [][]byte, sourceID bc.Hash, assetID bc.AssetID, amount, sourcePos uint64, controlProgram []byte) *TxInput {
sc := SpendCommitment{
AssetAmount: bc.AssetAmount{
--- /dev/null
+package types
+
+import (
+ "github.com/vapor/protocol/bc"
+)
+
+/*
+const (
+ LoginCandidate DposType = iota
+ LogoutCandidate
+ Delegate
+ UnDelegate
+)
+*/
+
+type DposTx struct {
+ SpendCommitmentSuffix []byte
+ Type TxType
+ From string
+ To string
+ Amount uint64
+ Stake uint64
+ PaymentAmount uint64
+ Arguments [][]byte
+ Info string
+ SpendCommitment
+}
+
+func NewDpos(arguments [][]byte, from, to string, sourceID bc.Hash, assetID bc.AssetID, stake, amount, sourcePos uint64, controlProgram []byte, t TxType) *TxInput {
+ var vote string
+ switch t {
+ case LoginCandidate:
+ case LogoutCandidate:
+ case Delegate:
+ vote = "vapor:1:event:vote"
+ case UnDelegate:
+ }
+ sc := SpendCommitment{
+ AssetAmount: bc.AssetAmount{
+ AssetId: &assetID,
+ Amount: amount,
+ },
+ SourceID: sourceID,
+ SourcePosition: sourcePos,
+ VMVersion: 1,
+ ControlProgram: controlProgram,
+ }
+
+ return &TxInput{
+ AssetVersion: 1,
+ TypedInput: &DposTx{
+ SpendCommitment: sc,
+ Type: t,
+ Amount: amount,
+ Arguments: arguments,
+ Info: vote,
+ Stake: stake,
+ From: from,
+ To: to,
+ },
+ }
+}
+
+// InputType is the interface function for return the input type.
+func (si *DposTx) InputType() uint8 { return DposInputType }
ord = 0
case *bc.Claim:
ord = 0
+ case *bc.Dpos:
+ ord = e.Ordinal
+ //spentOutputIDs[*e.SpentOutputId] = true
+ /*
+ if *e.WitnessDestination.Value.AssetId == *consensus.BTMAssetID {
+ tx.GasInputIDs = append(tx.GasInputIDs, id)
+ }
+ */
+ continue
default:
continue
}
var (
spends []*bc.Spend
+ dposs []*bc.Dpos
issuances []*bc.Issuance
coinbase *bc.Coinbase
claim *bc.Claim
Ref: &claimID,
Value: &inp.AssetAmount,
}
+ case *DposTx:
+ // create entry for prevout
+ prog := &bc.Program{VmVersion: inp.VMVersion, Code: inp.ControlProgram}
+ src := &bc.ValueSource{
+ Ref: &inp.SourceID,
+ Value: &inp.AssetAmount,
+ Position: inp.SourcePosition,
+ }
+ prevout := bc.NewOutput(src, prog, 0) // ordinal doesn't matter for prevouts, only for result outputs
+ prevoutID := addEntry(prevout)
+ // create entry for dpos
+ dpos := bc.NewDpos(&prevoutID, uint64(i), uint32(inp.Type), inp.Stake, inp.From, inp.To, inp.Info)
+ dpos.WitnessArguments = inp.Arguments
+ dposID := addEntry(dpos)
+ // setup mux
+ muxSources[i] = &bc.ValueSource{
+ Ref: &dposID,
+ Value: &inp.AssetAmount,
+ }
+ dposs = append(dposs, dpos)
}
}
issuance.SetDestination(&muxID, issuance.Value, issuance.Ordinal)
}
+ // connect the inputs to the mux
+ for _, dpos := range dposs {
+ spentOutput := entryMap[*dpos.SpentOutputId].(*bc.Output)
+ dpos.SetDestination(&muxID, spentOutput.Source.Value, dpos.Ordinal)
+ }
+
if coinbase != nil {
coinbase.SetDestination(&muxID, mux.Sources[0].Value, 0)
}
r := bc.NewRetirement(src, uint64(i))
resultID = addEntry(r)
} else {
+
// non-retirement
prog := &bc.Program{out.VMVersion, out.ControlProgram}
o := bc.NewOutput(src, prog, uint64(i))
func mapBlockHeader(old *BlockHeader) (bc.Hash, *bc.BlockHeader) {
proof := &bc.Proof{Sign: old.Proof.Sign, ControlProgram: old.Proof.ControlProgram}
- bh := bc.NewBlockHeader(old.Version, old.Height, &old.PreviousBlockHash, old.Timestamp, &old.TransactionsMerkleRoot, &old.TransactionStatusHash, proof)
+ bh := bc.NewBlockHeader(old.Version, old.Height, &old.PreviousBlockHash, old.Timestamp, &old.TransactionsMerkleRoot, &old.TransactionStatusHash, proof, old.Extra, old.Coinbase)
return bc.EntryID(bh), bh
}
"github.com/vapor/protocol/bc"
)
+type TxType uint8
+
+const (
+ Binary TxType = iota
+ LoginCandidate
+ LogoutCandidate
+ Delegate
+ UnDelegate
+)
+
// SpendInput satisfies the TypedInput interface and represents a spend transaction.
type SpendInput struct {
SpendCommitmentSuffix []byte // The unconsumed suffix of the output commitment
SpendInputType
CoinbaseInputType
ClainPeginInputType
+ DposInputType
)
type (
return inp.AssetAmount
case *ClaimInput:
return inp.AssetAmount
+ case *DposTx:
+ return inp.AssetAmount
}
return bc.AssetAmount{}
}
return *inp.AssetId
case *ClaimInput:
return *inp.AssetId
+ case *DposTx:
+ return *inp.AssetId
}
return bc.AssetID{}
return inp.Amount
case *ClaimInput:
return inp.Amount
+ case *DposTx:
+ return inp.Amount
}
return 0
}
return inp.Arguments
case *ClaimInput:
return inp.Arguments
+ case *DposTx:
+ return inp.Arguments
}
return nil
}
inp.Arguments = args
case *ClaimInput:
inp.Arguments = args
+ case *DposTx:
+ inp.Arguments = args
}
}
if ci.Arbitrary, err = blockchain.ReadVarstr31(r); err != nil {
return err
}
-
+ case DposInputType:
+ ci := new(DposTx)
+ t.TypedInput = ci
+ if ci.SpendCommitmentSuffix, err = ci.SpendCommitment.readFrom(r, 1); err != nil {
+ return err
+ }
default:
return fmt.Errorf("unsupported input type %d", icType[0])
}
if inp.Arguments, err = blockchain.ReadVarstrList(r); err != nil {
return err
}
+ case *DposTx:
+ txType := uint64(0)
+ if txType, err = blockchain.ReadVarint63(r); err != nil {
+ return err
+ }
+ inp.Type = TxType(txType)
+ var from []byte
+ if from, err = blockchain.ReadVarstr31(r); err != nil {
+ return err
+ }
+ inp.From = string(from)
+ var to []byte
+ if to, err = blockchain.ReadVarstr31(r); err != nil {
+ return err
+ }
+ inp.To = string(to)
+ if inp.Amount, err = blockchain.ReadVarint63(r); err != nil {
+ return err
+ }
+ if inp.Stake, err = blockchain.ReadVarint63(r); err != nil {
+ return err
+ }
+ if inp.PaymentAmount, err = blockchain.ReadVarint63(r); err != nil {
+ return err
+ }
+ if inp.Arguments, err = blockchain.ReadVarstrList(r); err != nil {
+ return err
+ }
+
+ var info []byte
+ if info, err = blockchain.ReadVarstr31(r); err != nil {
+ return err
+ }
+ inp.Info = string(info)
}
return nil
})
if _, err = blockchain.WriteVarstr31(w, inp.Arbitrary); err != nil {
return errors.Wrap(err, "writing coinbase arbitrary")
}
+ case *DposTx:
+ if _, err = w.Write([]byte{DposInputType}); err != nil {
+ return err
+ }
+ return inp.SpendCommitment.writeExtensibleString(w, inp.SpendCommitmentSuffix, t.AssetVersion)
}
return nil
}
_, err := blockchain.WriteVarstrList(w, inp.Arguments)
return err
+ case *DposTx:
+ if _, err := blockchain.WriteVarint63(w, uint64(inp.Type)); err != nil {
+ return err
+ }
+ if _, err := blockchain.WriteVarstr31(w, []byte(inp.From)); err != nil {
+ return err
+ }
+ if _, err := blockchain.WriteVarstr31(w, []byte(inp.To)); err != nil {
+ return err
+ }
+ if _, err := blockchain.WriteVarint63(w, inp.Amount); err != nil {
+ return err
+ }
+ if _, err := blockchain.WriteVarint63(w, inp.Stake); err != nil {
+ return err
+ }
+ if _, err := blockchain.WriteVarint63(w, inp.PaymentAmount); err != nil {
+ return err
+ }
+ if _, err := blockchain.WriteVarstrList(w, inp.Arguments); err != nil {
+ return err
+ }
+ _, err := blockchain.WriteVarstr31(w, []byte(inp.Info))
+
+ return err
}
return nil
}
bcBlock := types.MapBlock(block)
parent := c.index.GetNode(&block.PreviousBlockHash)
- if err := validation.ValidateBlock(bcBlock, parent, block, c.Authoritys, c.position); err != nil {
+ if err := validation.ValidateBlock(bcBlock, parent, block, c, c.engine, c.Authoritys, c.position); err != nil {
return errors.Sub(ErrBadBlock, err)
}
if err := c.store.SaveBlock(block, bcBlock.TransactionStatus); err != nil {
log "github.com/sirupsen/logrus"
"github.com/vapor/config"
+ engine "github.com/vapor/consensus/consensus"
"github.com/vapor/errors"
"github.com/vapor/protocol/bc"
"github.com/vapor/protocol/bc/types"
bestNode *state.BlockNode
Authoritys map[string]string
position uint64
+ engine engine.Engine
}
// NewChain returns a new Chain using store as the underlying storage.
c.position = position
}
+func (c *Chain) GetAuthoritys(key string) string {
+ return c.Authoritys[key]
+}
+
+func (c *Chain) SetConsensusEngine(engine engine.Engine) {
+ c.engine = engine
+}
+
func (c *Chain) initChainStatus() error {
genesisBlock := config.GenesisBlock()
txStatus := bc.NewTransactionStatus()
TransactionsMerkleRoot bc.Hash
TransactionStatusHash bc.Hash
Proof bc.Proof
+ Coinbase []byte
+ Extra []byte
}
func NewBlockNode(bh *types.BlockHeader, parent *BlockNode) (*BlockNode, error) {
Sign: bh.Proof.Sign,
ControlProgram: bh.Proof.ControlProgram,
},
+ Coinbase: bh.Coinbase,
+ Extra: bh.Extra,
}
/*
if bh.Height == 0 {
TransactionsMerkleRoot: node.TransactionsMerkleRoot,
TransactionStatusHash: node.TransactionStatusHash,
},
+ Coinbase: node.Coinbase,
+ Extra: node.Extra,
}
}
IsWithdrawSpent(hash *bc.Hash) bool
SetWithdrawSpent(hash *bc.Hash)
+
+ Set(hash *bc.Hash, data []byte) error
+ Get(hash *bc.Hash) ([]byte, error)
}
// BlockStoreState represents the core's db status
log.WithFields(log.Fields{"module": "ProcessTransaction", "error": "pegin-already-claimed"}).Error("ProcessTransaction error")
return false, errors.New("pegin-already-claimed")
}
-
txD := &TxDesc{
Tx: tx,
StatusFail: statusFail,
atomic.StoreInt64(&tp.lastUpdated, time.Now().Unix())
tp.msgCh <- &TxPoolMsg{TxDesc: txD, MsgType: MsgNewTx}
+
log.WithField("tx_id", tx.ID.String()).Debug("Add tx to mempool")
return nil
}
package validation
import (
- "encoding/hex"
"time"
log "github.com/sirupsen/logrus"
+ "github.com/vapor/chain"
"github.com/vapor/consensus"
- "github.com/vapor/crypto/ed25519/chainkd"
+ engine "github.com/vapor/consensus/consensus"
"github.com/vapor/errors"
"github.com/vapor/protocol/bc"
"github.com/vapor/protocol/bc/types"
}
// ValidateBlockHeader check the block's header
-func ValidateBlockHeader(b *bc.Block, parent *state.BlockNode) error {
+func ValidateBlockHeader(b *bc.Block, block *types.Block, parent *state.BlockNode, c chain.Chain, engine engine.Engine) error {
if b.Version < parent.Version {
return errors.WithDetailf(errVersionRegression, "previous block verson %d, current block version %d", parent.Version, b.Version)
}
if b.Height != parent.Height+1 {
return errors.WithDetailf(errMisorderedBlockHeight, "previous block height %d, current block height %d", parent.Height, b.Height)
}
- if b.Bits != parent.CalcNextBits() {
- return errBadBits
- }
if parent.Hash != *b.PreviousBlockId {
return errors.WithDetailf(errMismatchedBlock, "previous block ID %x, current block wants %x", parent.Hash.Bytes(), b.PreviousBlockId.Bytes())
}
if err := checkBlockTime(b, parent); err != nil {
return err
}
+ if err := engine.VerifyHeader(c, &block.BlockHeader, false); err != nil {
+ return err
+ }
+
return nil
}
// ValidateBlock validates a block and the transactions within.
-func ValidateBlock(b *bc.Block, parent *state.BlockNode, block *types.Block, authoritys map[string]string, position uint64) error {
+func ValidateBlock(b *bc.Block, parent *state.BlockNode, block *types.Block, c chain.Chain, engine engine.Engine, authoritys map[string]string, position uint64) error {
startTime := time.Now()
- if err := ValidateBlockHeader(b, parent); err != nil {
+ if err := ValidateBlockHeader(b, block, parent, c, engine); err != nil {
return err
}
- time.Sleep(3 * time.Second)
- // 验证出块人
- controlProgram := hex.EncodeToString(block.Proof.ControlProgram)
- xpub := &chainkd.XPub{}
- xpub.UnmarshalText([]byte(authoritys[controlProgram]))
-
- msg := block.BlockCommitment.TransactionsMerkleRoot.Bytes()
- if !xpub.Verify(msg, block.Proof.Sign) {
- return errors.New("Verification signature failed")
- }
+ /*
+ time.Sleep(3 * time.Second)
+ // 验证出块人
+ controlProgram := hex.EncodeToString(block.Proof.ControlProgram)
+ xpub := &chainkd.XPub{}
+ xpub.UnmarshalText([]byte(authoritys[controlProgram]))
+ msg := block.BlockCommitment.TransactionsMerkleRoot.Bytes()
+ if !xpub.Verify(msg, block.Proof.Sign) {
+ return errors.New("Verification signature failed")
+ }
+ */
blockGasSum := uint64(0)
coinbaseAmount := consensus.BlockSubsidy(b.BlockHeader.Height)
b.TransactionStatus = bc.NewTransactionStatus()
-
for i, tx := range b.Transactions {
gasStatus, err := ValidateTx(tx, b)
if !gasStatus.GasValid {
case *bc.Mux:
parity := make(map[bc.AssetID]int64)
for i, src := range e.Sources {
+ e, ok := vs.tx.Entries[*src.Ref]
+ if !ok {
+ return errors.Wrapf(bc.ErrMissingEntry, "entry for bytom input %x not found", *src.Ref)
+ }
+ switch e.(type) {
+ case *bc.Dpos:
+ continue
+ default:
+ }
+
if src.Value.Amount > math.MaxInt64 {
return errors.WithDetailf(ErrOverflow, "amount %d exceeds maximum value 2^63", src.Value.Amount)
}
if !ok {
return errors.Wrapf(bc.ErrMissingEntry, "entry for bytom input %x not found", BTMInputID)
}
-
vs2 := *vs
vs2.entryID = BTMInputID
if err := checkValid(&vs2, e); err != nil {
if err != nil {
return errors.Wrap(err, "getting spend prevout")
}
-
gasLeft, err := vm.Verify(NewTxVMContext(vs, e, spentOutput.ControlProgram, e.WitnessArguments), vs.gasStatus.GasLeft)
if err != nil {
return errors.Wrap(err, "checking control program")
return errors.Wrap(err, "checking spend destination")
}
vs.gasStatus.GasValid = true
+ case *bc.Dpos:
+ //fmt.Printf("kkkkkkkkkkkkkkkkkkkkkkkkkkk %T\n", e)
default:
return fmt.Errorf("entry has unexpected type %T", e)
}
}
func IsValidPeginWitness(peginWitness [][]byte, prevout bc.Output) (err error) {
- /*
- assetID := bc.AssetID{}
- assetID.V0 = prevout.Source.Value.AssetId.GetV0()
- assetID.V1 = prevout.Source.Value.AssetId.GetV1()
- assetID.V2 = prevout.Source.Value.AssetId.GetV2()
- assetID.V3 = prevout.Source.Value.AssetId.GetV3()
- //bytomPrevout.Source.Value.AssetId = &assetId
-
- sourceID := bc.Hash{}
- sourceID.V0 = prevout.Source.Ref.GetV0()
- sourceID.V1 = prevout.Source.Ref.GetV1()
- sourceID.V2 = prevout.Source.Ref.GetV2()
- sourceID.V3 = prevout.Source.Ref.GetV3()
- */
assetAmount := &bc.AssetAmount{
AssetId: prevout.Source.Value.AssetId,
if !consensus.MoneyRange(amount) {
return errors.New("Amount out of range")
}
-
- if len(peginWitness[1]) != 64 {
- return errors.New("The length of gennesisBlockHash is not correct")
- }
-
+ /*
+ if len(peginWitness[1]) != 32 {
+ return errors.New("The length of gennesisBlockHash is not correct")
+ }
+ */
claimScript := peginWitness[2]
rawTx := &bytomtypes.Tx{}
if err = checkPeginTx(rawTx, bytomPrevout, amount, claimScript); err != nil {
return err
}
-
+ var b bc.Hash
+ b.UnmarshalText(peginWitness[1])
// Check the genesis block corresponds to a valid peg (only one for now)
- if !bytes.Equal(peginWitness[1], []byte(consensus.ActiveNetParams.ParentGenesisBlockHash)) {
+ if b.String() != consensus.ActiveNetParams.ParentGenesisBlockHash {
return errors.New("ParentGenesisBlockHash don't match")
}
// TODO Finally, validate peg-in via rpc call
return errors.Wrapf(ErrPosition, "invalid position %d for coinbase source", vs.Position)
}
dest = ref.WitnessDestination
+ case *bc.Dpos:
+ if vs.Position != 0 {
+ return errors.Wrapf(ErrPosition, "invalid position %d for coinbase source", vs.Position)
+ }
+ dest = ref.WitnessDestination
default:
return errors.Wrapf(bc.ErrEntryType, "value source is %T, should be coinbase, issuance, spend, or mux", e)
}
package vm
-import "bytes"
+import (
+ "bytes"
+)
func opInvert(vm *virtualMachine) error {
err := vm.applyCost(1)
fmt.Fprintf(TraceOut, " stack %d: %x\n", len(vm.dataStack)-1-i, vm.dataStack[i])
}
}
-
return nil
}
package wallet
import (
+ "bytes"
"encoding/json"
"fmt"
"sort"
chainjson "github.com/vapor/encoding/json"
"github.com/vapor/protocol/bc"
"github.com/vapor/protocol/bc/types"
+ "github.com/vapor/protocol/vm/vmutil"
)
const (
// filterAccountTxs related and build the fully annotated transactions.
func (w *Wallet) filterAccountTxs(b *types.Block, txStatus *bc.TransactionStatus) []*query.AnnotatedTx {
annotatedTxs := make([]*query.AnnotatedTx, 0, len(b.Transactions))
-
+ redeemContract := w.dposAddress.ScriptAddress()
+ program, _ := vmutil.P2WPKHProgram(redeemContract)
transactionLoop:
for pos, tx := range b.Transactions {
statusFail, _ := txStatus.GetStatus(pos)
for _, v := range tx.Outputs {
+
+ if bytes.Equal(v.ControlProgram, program) {
+ annotatedTxs = append(annotatedTxs, w.buildAnnotatedTransaction(tx, b, statusFail, pos))
+ continue transactionLoop
+ }
+
var hash [32]byte
sha3pool.Sum256(hash[:], v.ControlProgram)
if bytes := w.DB.Get(account.ContractKey(hash)); bytes != nil {
}
for _, v := range tx.Inputs {
+ if bytes.Equal(v.ControlProgram(), program) {
+ annotatedTxs = append(annotatedTxs, w.buildAnnotatedTransaction(tx, b, statusFail, pos))
+ continue transactionLoop
+ }
+
outid, err := v.SpentOutputID()
if err != nil {
continue
import (
"encoding/json"
+ "fmt"
log "github.com/sirupsen/logrus"
"github.com/tendermint/tmlibs/db"
"github.com/vapor/errors"
"github.com/vapor/protocol/bc"
"github.com/vapor/protocol/bc/types"
+ "github.com/vapor/protocol/vm/vmutil"
)
// GetAccountUtxos return all account unspent outputs
}
func (w *Wallet) attachUtxos(batch db.Batch, b *types.Block, txStatus *bc.TransactionStatus) {
+ /*
+ a := bc.Hash{}
+ a.UnmarshalText([]byte("bef9c83e5cadc6dbb80b81387f3e3c3fadd76b917e5337f5442b9ef071c06526"))
+ batch.Delete(account.StandardUTXOKey(a))
+ a.UnmarshalText([]byte("1a5e2141a12823dabf343b5ace0a181a3d018e24f3dc6e7c3704b66fc040ca7b"))
+ batch.Delete(account.StandardUTXOKey(a))
+ a.UnmarshalText([]byte("4647b1e0893f56438f9bbde6134840f1595da799cfc6ece77c4d9aabdf9cfe50"))
+ batch.Delete(account.StandardUTXOKey(a))
+ a.UnmarshalText([]byte("928094d14b00aaf674ee291bbfb0c843a4dab53984f6235b998338fe0fa2d688"))
+ batch.Delete(account.StandardUTXOKey(a))
+ a.UnmarshalText([]byte("e20aee90018f8b6483d5590786fcf495bccfa7f1a3a5a5a9106c4143f71d49a4"))
+ batch.Delete(account.StandardUTXOKey(a))
+ a.UnmarshalText([]byte("2cb18fe2dd3eb8dcf2df43aa6650851dd0b6de291bfffd151a36703c92f8e864"))
+ batch.Delete(account.StandardUTXOKey(a))
+ a.UnmarshalText([]byte("48d71e6da11de69983b0cc79787f0f9422a144c94e687dfec11b4a57fdca2832"))
+ batch.Delete(account.StandardUTXOKey(a))
+ */
for txIndex, tx := range b.Transactions {
statusFail, err := txStatus.GetStatus(txIndex)
if err != nil {
func (w *Wallet) filterAccountUtxo(utxos []*account.UTXO) []*account.UTXO {
outsByScript := make(map[string][]*account.UTXO, len(utxos))
+ redeemContract := w.dposAddress.ScriptAddress()
+ program, _ := vmutil.P2WPKHProgram(redeemContract)
+ isDposAddress := false
for _, utxo := range utxos {
scriptStr := string(utxo.ControlProgram)
outsByScript[scriptStr] = append(outsByScript[scriptStr], utxo)
sha3pool.Sum256(hash[:], []byte(s))
data := w.DB.Get(account.ContractKey(hash))
if data == nil {
- continue
- }
+ if s == string(program) {
+ isDposAddress = true
+ } else {
+ continue
+ }
- cp := &account.CtrlProgram{}
- if err := json.Unmarshal(data, cp); err != nil {
- log.WithField("err", err).Error("filterAccountUtxo fail on unmarshal control program")
- continue
}
- for _, utxo := range outsByScript[s] {
- utxo.AccountID = cp.AccountID
- utxo.Address = cp.Address
- utxo.ControlProgramIndex = cp.KeyIndex
- utxo.Change = cp.Change
- result = append(result, utxo)
+ if !isDposAddress {
+ cp := &account.CtrlProgram{}
+ if err := json.Unmarshal(data, cp); err != nil {
+ log.WithField("err", err).Error("filterAccountUtxo fail on unmarshal control program")
+ continue
+ }
+ for _, utxo := range outsByScript[s] {
+ utxo.AccountID = cp.AccountID
+ utxo.Address = cp.Address
+ utxo.ControlProgramIndex = cp.KeyIndex
+ utxo.Change = cp.Change
+ result = append(result, utxo)
+ }
+ } else {
+ for _, utxo := range outsByScript[s] {
+ utxo.Address = w.dposAddress.EncodeAddress()
+ result = append(result, utxo)
+ }
+ isDposAddress = false
}
}
return result
if err != nil {
continue
}
-
resOut, err := tx.Output(*sp.SpentOutputId)
if err != nil {
log.WithField("err", err).Error("txInToUtxos fail on get resOut")
}
if statusFail && *resOut.Source.Value.AssetId != *consensus.BTMAssetID {
+ fmt.Println("statusFail:", statusFail)
continue
}
"github.com/vapor/account"
"github.com/vapor/asset"
"github.com/vapor/blockchain/pseudohsm"
+ "github.com/vapor/common"
"github.com/vapor/protocol"
"github.com/vapor/protocol/bc"
"github.com/vapor/protocol/bc/types"
chain *protocol.Chain
RecoveryMgr *recoveryManager
rescanCh chan struct{}
+ dposAddress common.Address
}
//NewWallet return a new wallet instance
-func NewWallet(walletDB db.DB, account *account.Manager, asset *asset.Registry, hsm *pseudohsm.HSM, chain *protocol.Chain) (*Wallet, error) {
+func NewWallet(walletDB db.DB, account *account.Manager, asset *asset.Registry, hsm *pseudohsm.HSM, chain *protocol.Chain, dposAddress common.Address) (*Wallet, error) {
w := &Wallet{
DB: walletDB,
AccountMgr: account,
Hsm: hsm,
RecoveryMgr: newRecoveryManager(walletDB, account),
rescanCh: make(chan struct{}, 1),
+ dposAddress: dposAddress,
}
if err := w.loadWalletInfo(); err != nil {