OSDN Git Service

add query API
authorroot <zhouarrogant@outlook.com>
Thu, 7 Sep 2017 07:34:26 +0000 (07:34 +0000)
committerroot <zhouarrogant@outlook.com>
Thu, 7 Sep 2017 07:34:26 +0000 (07:34 +0000)
101 files changed:
README.md
blockchain/account/accounts.go
blockchain/mempool.go [new file with mode: 0644]
blockchain/mempool_test.go [new file with mode: 0644]
blockchain/pool_test.go
blockchain/reactor.go
blockchain/request.go [new file with mode: 0644]
blockchain/signers/signers.go
blockchain/transact.go [new file with mode: 0644]
cmd/bytom/Readme.md
cmd/bytom/commands/init.go
cmd/bytom/commands/run_node.go
cmd/bytom/test.sh
cmd/bytom/test/.blockchain/genesis.json [deleted file]
cmd/bytom/test/.blockchain/priv_validator.json [deleted file]
cmd/bytom/test/.blockchain1/genesis.json [deleted file]
cmd/bytom/test/.blockchain1/priv_validator.json [deleted file]
cmd/bytom/test/.bytom0/config.toml [moved from cmd/bytom/test/.blockchain/config.toml with 88% similarity]
cmd/bytom/test/.bytom0/genesis.json [new file with mode: 0644]
cmd/bytom/test/.bytom1/config.toml [moved from cmd/bytom/test/.blockchain1/config.toml with 88% similarity]
cmd/bytom/test/.bytom1/genesis.json [new file with mode: 0644]
cmd/bytomcli/example/issue.go [new file with mode: 0644]
cmd/bytomcli/issue.py [new file with mode: 0644]
cmd/bytomcli/main.go
config/config.go
config/config_test.go
config/toml.go
node/node.go
protocol/bc/bc.pb.go
protocol/bc/bc.proto
protocol/bc/blockheader.go
protocol/bc/blockheader_test.go [new file with mode: 0644]
protocol/bc/coinbase.go [new file with mode: 0644]
protocol/bc/entry_test.go
protocol/bc/legacy/block_commitment.go
protocol/bc/legacy/block_header.go
protocol/bc/legacy/block_test.go
protocol/bc/legacy/block_witness.go [deleted file]
protocol/bc/legacy/map.go
protocol/bc/legacy/map_test.go
protocol/bc/legacy/transaction.go
protocol/bc/legacy/transaction_test.go
protocol/bc/legacy/tx_test.go
protocol/bc/nonce.go
protocol/bc/timerange.go [deleted file]
protocol/bc/tx.go
protocol/bc/txheader.go
protocol/block.go
protocol/block_test.go
protocol/prottest/block.go
protocol/prottest/block_test.go
protocol/prottest/memstore/memstore.go
protocol/recover_test.go
protocol/state/snapshot.go
protocol/tx.go
protocol/tx_test.go
protocol/validation/block_test.go
protocol/validation/fuzz_test.go [deleted file]
protocol/validation/validation.go
protocol/validation/validation_test.go
protocol/validation/vmcontext.go
protocol/vm/context.go
protocol/vm/crypto.go
protocol/vm/crypto_test.go
protocol/vm/doc.go [deleted file]
protocol/vm/errors.go
protocol/vm/introspection.go
protocol/vm/introspection_test.go
protocol/vm/ops.go
protocol/vm/vm.go
protocol/vm/vm_test.go
protocol/vm/vmutil/script.go
protocol/vm/vmutil/script_test.go
rpc/core/types/responses.go
testutil/deepequal.go [new file with mode: 0644]
testutil/deepequal_test.go [new file with mode: 0644]
testutil/expect.go [new file with mode: 0644]
testutil/keys.go [new file with mode: 0644]
types/block.go [deleted file]
types/block_meta.go [deleted file]
types/canonical_json.go [deleted file]
types/events.go
types/genesis.go
types/keys.go [deleted file]
types/part_set.go [deleted file]
types/part_set_test.go [deleted file]
types/priv_validator.go [deleted file]
types/priv_validator_test.go [deleted file]
types/proposal.go [deleted file]
types/proposal_test.go [deleted file]
types/protobuf.go [deleted file]
types/services.go [deleted file]
types/tx.go [deleted file]
types/tx_test.go [deleted file]
types/validator.go [deleted file]
types/validator_set.go [deleted file]
types/validator_set_test.go [deleted file]
types/vote.go [deleted file]
types/vote_set.go [deleted file]
types/vote_set_test.go [deleted file]
types/vote_test.go [deleted file]

index c1ed67b..1df7956 100644 (file)
--- a/README.md
+++ b/README.md
 ## p2p & grpc test (两个节点测试)
 ``` console
 1. cd ./cmd/bytom
-2. ./test.sh node1
-3. ./test.sh node2
+2. ./test.sh bytom0
+3. ./test.sh bytom1
 4. curl -X POST --data '{"jsonrpc":"2.0", "method": "net_info", "params":[], "id":"67"}' http://127.0.0.1:46657
 ```
 ## bytomcli & bytom test
 ``` console
 1. cd ./cmd/bytom
-2. ./test.sh node1
+2. ./test.sh bytom0
 3. cd ./cmd/bytomcli
 4. ./bytomcli <command> <opt...>
 ```
index 219f111..8fae8d3 100644 (file)
@@ -10,7 +10,7 @@ import (
     "fmt"
 
        "github.com/golang/groupcache/lru"
-       "github.com/lib/pq"
+       //"github.com/lib/pq"
 
 //     "chain/core/pin"
        "github.com/bytom/blockchain/signers"
@@ -298,7 +298,7 @@ func (m *Manager) CreateControlProgram(ctx context.Context, accountID string, ch
 
 
 func (m *Manager) insertAccountControlProgram(ctx context.Context, progs ...*controlProgram) error {
-       const q = `
+       /*const q = `
                INSERT INTO account_control_programs (signer_id, key_index, control_program, change, expires_at)
                SELECT unnest($1::text[]), unnest($2::bigint[]), unnest($3::bytea[]), unnest($4::boolean[]),
                        unnest($5::timestamp with time zone[])
@@ -308,18 +308,18 @@ func (m *Manager) insertAccountControlProgram(ctx context.Context, progs ...*con
                keyIndexes   pq.Int64Array
                controlProgs pq.ByteaArray
                change       pq.BoolArray
-       //      expirations  []stdsql.NullString
+               expirations  []stdsql.NullString
        )
        for _, p := range progs {
                accountIDs = append(accountIDs, p.accountID)
                keyIndexes = append(keyIndexes, int64(p.keyIndex))
                controlProgs = append(controlProgs, p.controlProgram)
                change = append(change, p.change)
-       /*      expirations = append(expirations, stdsql.NullString{
+               expirations = append(expirations, stdsql.NullString{
                        String: p.expiresAt.Format(time.RFC3339),
                        Valid:  !p.expiresAt.IsZero(),
-               })*/
-       }
+               })
+       }*/
 
 //     _, err := m.dbm.ExecContext(ctx, q, accountIDs, keyIndexes, controlProgs, change, pq.Array(expirations))
        return errors.Wrap(nil)
diff --git a/blockchain/mempool.go b/blockchain/mempool.go
new file mode 100644 (file)
index 0000000..96355f1
--- /dev/null
@@ -0,0 +1,129 @@
+package blockchain
+
+import (
+       "errors"
+       "sync"
+       "sync/atomic"
+       "time"
+
+       "github.com/golang/groupcache/lru"
+
+       "github.com/bytom/protocol/bc"
+)
+
+var (
+       maxCachedErrTxs = 1000
+
+       ErrTransactionNotExist = errors.New("transaction are not existed in the mempool")
+)
+
+type TxDesc struct {
+       Tx       *bc.Tx
+       Added    time.Time
+       Height   uint64
+       Fee      uint64
+       FeePerKB uint64
+}
+
+type TxPool struct {
+       lastUpdated int64
+       mtx         sync.RWMutex
+       pool        map[bc.Hash]*TxDesc
+       errCache    *lru.Cache
+}
+
+func NewTxPool() *TxPool {
+       return &TxPool{
+               lastUpdated: time.Now().Unix(),
+               pool:        make(map[bc.Hash]*TxDesc),
+               errCache:    lru.New(maxCachedErrTxs),
+       }
+}
+
+func (mp *TxPool) AddTransaction(tx *bc.Tx, height uint64, fee uint64) *TxDesc {
+       txD := &TxDesc{
+               Tx:       tx,
+               Added:    time.Now(),
+               Height:   height,
+               Fee:      fee,
+               FeePerKB: fee * 1000 / tx.TxHeader.SerializedSize,
+       }
+
+       mp.mtx.Lock()
+       defer mp.mtx.Unlock()
+
+       mp.pool[tx.ID] = txD
+       atomic.StoreInt64(&mp.lastUpdated, time.Now().Unix())
+       return txD
+}
+
+func (mp *TxPool) AddErrCache(txHash *bc.Hash) {
+       mp.mtx.Lock()
+       defer mp.mtx.Unlock()
+
+       mp.errCache.Add(txHash, nil)
+}
+
+func (mp *TxPool) removeTransaction(txHash *bc.Hash) {
+       mp.mtx.Lock()
+       defer mp.mtx.Unlock()
+
+       if _, ok := mp.pool[*txHash]; ok {
+               delete(mp.pool, *txHash)
+               atomic.StoreInt64(&mp.lastUpdated, time.Now().Unix())
+       }
+}
+
+func (mp *TxPool) GetTransaction(txHash *bc.Hash) (*TxDesc, error) {
+       mp.mtx.RLock()
+       defer mp.mtx.RUnlock()
+
+       if txD, ok := mp.pool[*txHash]; ok {
+               return txD, nil
+       }
+
+       return nil, ErrTransactionNotExist
+}
+
+func (mp *TxPool) GetTransactions() []*TxDesc {
+       mp.mtx.RLock()
+       defer mp.mtx.RUnlock()
+
+       txDs := make([]*TxDesc, len(mp.pool))
+       i := 0
+       for _, desc := range mp.pool {
+               txDs[i] = desc
+               i++
+       }
+       return txDs
+}
+
+func (mp *TxPool) IsTransactionInPool(txHash *bc.Hash) bool {
+       mp.mtx.RLock()
+       defer mp.mtx.RUnlock()
+
+       if _, ok := mp.pool[*txHash]; ok {
+               return true
+       }
+       return false
+}
+
+func (mp *TxPool) IsTransactionInErrCache(txHash *bc.Hash) bool {
+       mp.mtx.RLock()
+       defer mp.mtx.RUnlock()
+
+       _, ok := mp.errCache.Get(txHash)
+       return ok
+}
+
+func (mp *TxPool) HaveTransaction(txHash *bc.Hash) bool {
+       return mp.IsTransactionInPool(txHash) || mp.IsTransactionInErrCache(txHash)
+}
+
+func (mp *TxPool) Count() int {
+       mp.mtx.RLock()
+       defer mp.mtx.RUnlock()
+
+       count := len(mp.pool)
+       return count
+}
diff --git a/blockchain/mempool_test.go b/blockchain/mempool_test.go
new file mode 100644 (file)
index 0000000..9fbe3b1
--- /dev/null
@@ -0,0 +1,59 @@
+package blockchain
+
+import (
+       "testing"
+
+       "github.com/bytom/protocol/bc"
+       "github.com/bytom/protocol/bc/legacy"
+       "github.com/bytom/protocol/validation"
+)
+
+func TestTxPool(t *testing.T) {
+       p := NewTxPool()
+
+       txA := mockCoinbaseTx(1000, 6543)
+       txB := mockCoinbaseTx(2000, 2324)
+       txC := mockCoinbaseTx(3000, 9322)
+
+       p.AddTransaction(txA, 1000, 5000000000)
+       if !p.IsTransactionInPool(&txA.ID) {
+               t.Errorf("fail to find added txA in tx pool")
+       } else {
+               i, _ := p.GetTransaction(&txA.ID)
+               if i.Height != 1000 || i.Fee != 5000000000 || i.FeePerKB != 5000000000 {
+                       t.Errorf("incorrect data of TxDesc structure")
+               }
+       }
+
+       if p.IsTransactionInPool(&txB.ID) {
+               t.Errorf("shouldn't find txB in tx pool")
+       }
+       p.AddTransaction(txB, 1000, 5000000000)
+       if !p.IsTransactionInPool(&txB.ID) {
+               t.Errorf("shouldn find txB in tx pool")
+       }
+       if p.Count() != 2 {
+               t.Errorf("get wrong number of tx in the pool")
+       }
+       p.removeTransaction(&txB.ID)
+       if p.IsTransactionInPool(&txB.ID) {
+               t.Errorf("shouldn't find txB in tx pool")
+       }
+
+       p.AddErrCache(&txC.ID)
+       if !p.IsTransactionInErrCache(&txC.ID) {
+               t.Errorf("shouldn find txC in tx err cache")
+       }
+       if !p.HaveTransaction(&txC.ID) {
+               t.Errorf("shouldn find txC in tx err cache")
+       }
+}
+
+func mockCoinbaseTx(serializedSize uint64, amount uint64) *bc.Tx {
+       return legacy.MapTx(&legacy.TxData{
+               SerializedSize: serializedSize,
+               Outputs: []*legacy.TxOutput{
+                       legacy.NewTxOutput(*validation.BTMAssetID, amount, []byte{1}, nil),
+               },
+       })
+}
index bd1b7cc..4501583 100644 (file)
@@ -1,5 +1,6 @@
 package blockchain
 
+/*
 import (
        "math/rand"
        "testing"
@@ -135,3 +136,4 @@ func TestTimeout(t *testing.T) {
                }
        }
 }
+*/
index e9c30e1..e86e493 100644 (file)
@@ -27,6 +27,7 @@ import (
        "github.com/bytom/net/http/static"
        "github.com/bytom/generated/dashboard"
        "github.com/bytom/errors"
+       "github.com/bytom/blockchain/txbuilder"
 )
 
 const (
@@ -44,29 +45,18 @@ const (
        statusUpdateIntervalSeconds = 10
        // check if we should switch to consensus reactor
        switchToConsensusIntervalSeconds = 1
-       maxBlockchainResponseSize        = types.MaxBlockSize + 2
+       maxBlockchainResponseSize        = 22020096 + 2
        crosscoreRPCPrefix = "/rpc/"
 )
 
-/*
-type consensusReactor interface {
-       // for when we switch from blockchain reactor and fast sync to
-       // the consensus machine
-       SwitchToConsensus(*sm.State)
-}
-*/
-
 // BlockchainReactor handles long-term catchup syncing.
 type BlockchainReactor struct {
        p2p.BaseReactor
 
-//     state        *sm.State
-//     proxyAppConn proxy.AppConnConsensus // same as consensus.proxyAppConn
-//     store        *MemStore
        chain        *protocol.Chain
        store        *txdb.Store
        accounts         *account.Manager
-       assets           *asset.Registry
+       assets       *asset.Registry
        txFeeds          *txfeed.TxFeed
        indexer         *query.Indexer
        pool         *BlockPool
@@ -75,7 +65,7 @@ type BlockchainReactor struct {
        fastSync     bool
        requestsCh   chan BlockRequest
        timeoutsCh   chan string
-//     lastBlock    *types.Block
+       submitter    txbuilder.Submitter
 
        evsw types.EventSwitch
 }
@@ -165,6 +155,7 @@ func (bcr *BlockchainReactor) BuildHander() {
        m.Handle("/create-asset", jsonHandler(bcr.createAsset))
        m.Handle("/update-account-tags",jsonHandler(bcr.updateAccountTags))
        m.Handle("/update-asset-tags",jsonHandler(bcr.updateAssetTags))
+       m.Handle("/build-transaction", jsonHandler(bcr.build))
        m.Handle("/create-control-program",jsonHandler(bcr.createControlProgram))
        m.Handle("/create-account-receiver", jsonHandler(bcr.createAccountReceiver))
        m.Handle("/create-transaction-feed", jsonHandler(bcr.createTxFeed))
@@ -243,7 +234,7 @@ type page struct {
        LastPage bool         `json:"last_page"`
 }
 
-func NewBlockchainReactor(store *txdb.Store, chain *protocol.Chain, accounts *account.Manager, fastSync bool) *BlockchainReactor {
+func NewBlockchainReactor(store *txdb.Store, chain *protocol.Chain, accounts *account.Manager, assets *asset.Registry, fastSync bool) *BlockchainReactor {
     requestsCh    := make(chan BlockRequest, defaultChannelCapacity)
     timeoutsCh    := make(chan string, defaultChannelCapacity)
     pool := NewBlockPool(
@@ -255,6 +246,7 @@ func NewBlockchainReactor(store *txdb.Store, chain *protocol.Chain, accounts *ac
         chain:         chain,
         store:         store,
                accounts:      accounts,
+               assets:            assets,
         pool:          pool,
                mux:           http.NewServeMux(),
         fastSync:      fastSync,
@@ -423,10 +415,12 @@ func (bcR *BlockchainReactor) BroadcastStatusRequest() error {
 }
 
 
+/*
 // SetEventSwitch implements events.Eventable
 func (bcR *BlockchainReactor) SetEventSwitch(evsw types.EventSwitch) {
        bcR.evsw = evsw
 }
+*/
 
 //-----------------------------------------------------------------------------
 // Messages
diff --git a/blockchain/request.go b/blockchain/request.go
new file mode 100644 (file)
index 0000000..8165abc
--- /dev/null
@@ -0,0 +1,46 @@
+package blockchain
+
+import (
+       "context"
+
+       "github.com/bytom/encoding/json"
+       "github.com/bytom/errors"
+       "github.com/bytom/protocol/bc/legacy"
+)
+
+var (
+       errBadActionType = errors.New("bad action type")
+       errBadAlias      = errors.New("bad alias")
+       errBadAction     = errors.New("bad action object")
+)
+
+type buildRequest struct {
+       Tx      *legacy.TxData           `json:"base_transaction"`
+       Actions []map[string]interface{} `json:"actions"`
+       TTL     json.Duration            `json:"ttl"`
+}
+
+func (a *BlockchainReactor) filterAliases(ctx context.Context, br *buildRequest) error {
+       for i, m := range br.Actions {
+               id, _ := m["assset_id"].(string)
+               alias, _ := m["asset_alias"].(string)
+               if id == "" && alias != "" {
+                       asset, err := a.assets.FindByAlias(ctx, alias)
+                       if err != nil {
+                               return errors.WithDetailf(err, "invalid asset alias %s on action %d", alias, i)
+                       }
+                       m["asset_id"] = asset.AssetID
+               }
+
+               id, _ = m["account_id"].(string)
+               alias, _ = m["account_alias"].(string)
+               if id == "" && alias != "" {
+                       acc, err := a.accounts.FindByAlias(ctx, alias)
+                       if err != nil {
+                               return errors.WithDetailf(err, "invalid account alias %s on action %d", alias, i)
+                       }
+                       m["account_id"] = acc.ID
+               }
+       }
+       return nil
+}
index 94e2955..ae372b3 100644 (file)
@@ -125,7 +125,7 @@ func Create(ctx context.Context, db dbm.DB, typ string, xpubs []chainkd.XPub, qu
        }
     */
        var (
-               id       string
+               id       string = "alice"
                keyIndex uint64
        )
 
diff --git a/blockchain/transact.go b/blockchain/transact.go
new file mode 100644 (file)
index 0000000..ce86c84
--- /dev/null
@@ -0,0 +1,320 @@
+package blockchain
+
+import (
+       "context"
+       "encoding/json"
+       "sync"
+       "time"
+
+       //"github.com/bytom/blockchain/fetch"
+       "github.com/bytom/blockchain/txbuilder"
+       chainjson "github.com/bytom/encoding/json"
+       "github.com/bytom/errors"
+       //"github.com/bytom/log"
+       "github.com/bytom/net/http/httperror"
+       "github.com/bytom/net/http/reqid"
+       //"github.com/bytom/protocol/bc"
+       "github.com/bytom/protocol/bc/legacy"
+)
+
+var defaultTxTTL = 5 * time.Minute
+
+func (a *BlockchainReactor) actionDecoder(action string) (func([]byte) (txbuilder.Action, error), bool) {
+       var decoder func([]byte) (txbuilder.Action, error)
+       switch action {
+       case "control_account":
+               decoder = a.accounts.DecodeControlAction
+       case "control_program":
+               decoder = txbuilder.DecodeControlProgramAction
+       case "control_receiver":
+               decoder = txbuilder.DecodeControlReceiverAction
+       case "issue":
+               decoder = a.assets.DecodeIssueAction
+       case "retire":
+               decoder = txbuilder.DecodeRetireAction
+       case "spend_account":
+               decoder = a.accounts.DecodeSpendAction
+       case "spend_account_unspent_output":
+               decoder = a.accounts.DecodeSpendUTXOAction
+       case "set_transaction_reference_data":
+               decoder = txbuilder.DecodeSetTxRefDataAction
+       default:
+               return nil, false
+       }
+       return decoder, true
+}
+
+func (a *BlockchainReactor) buildSingle(ctx context.Context, req *buildRequest) (*txbuilder.Template, error) {
+       err := a.filterAliases(ctx, req)
+       if err != nil {
+               return nil, err
+       }
+       actions := make([]txbuilder.Action, 0, len(req.Actions))
+       for i, act := range req.Actions {
+               typ, ok := act["type"].(string)
+               if !ok {
+                       return nil, errors.WithDetailf(errBadActionType, "no action type provided on action %d", i)
+               }
+               decoder, ok := a.actionDecoder(typ)
+               if !ok {
+                       return nil, errors.WithDetailf(errBadActionType, "unknown action type %q on action %d", typ, i)
+               }
+
+               // Remarshal to JSON, the action may have been modified when we
+               // filtered aliases.
+               b, err := json.Marshal(act)
+               if err != nil {
+                       return nil, err
+               }
+               a, err := decoder(b)
+               if err != nil {
+                       return nil, errors.WithDetailf(errBadAction, "%s on action %d", err.Error(), i)
+               }
+               actions = append(actions, a)
+       }
+
+       ttl := req.TTL.Duration
+       if ttl == 0 {
+               ttl = defaultTxTTL
+       }
+       maxTime := time.Now().Add(ttl)
+       tpl, err := txbuilder.Build(ctx, req.Tx, actions, maxTime)
+       if errors.Root(err) == txbuilder.ErrAction {
+               // Format each of the inner errors contained in the data.
+               var formattedErrs []httperror.Response
+               for _, innerErr := range errors.Data(err)["actions"].([]error) {
+                       resp := errorFormatter.Format(innerErr)
+                       formattedErrs = append(formattedErrs, resp)
+               }
+               err = errors.WithData(err, "actions", formattedErrs)
+       }
+       if err != nil {
+               return nil, err
+       }
+
+       // ensure null is never returned for signing instructions
+       if tpl.SigningInstructions == nil {
+               tpl.SigningInstructions = []*txbuilder.SigningInstruction{}
+       }
+       return tpl, nil
+}
+
+// POST /build-transaction
+func (a *BlockchainReactor) build(ctx context.Context, buildReqs []*buildRequest) (interface{}, error) {
+       responses := make([]interface{}, len(buildReqs))
+       var wg sync.WaitGroup
+       wg.Add(len(responses))
+
+       for i := 0; i < len(responses); i++ {
+               go func(i int) {
+                       subctx := reqid.NewSubContext(ctx, reqid.New())
+                       defer wg.Done()
+                       defer batchRecover(subctx, &responses[i])
+
+                       tmpl, err := a.buildSingle(subctx, buildReqs[i])
+                       if err != nil {
+                               responses[i] = err
+                       } else {
+                               responses[i] = tmpl
+                       }
+               }(i)
+       }
+
+       wg.Wait()
+       return responses, nil
+}
+
+func (a *BlockchainReactor) submitSingle(ctx context.Context, tpl *txbuilder.Template, waitUntil string) (interface{}, error) {
+       if tpl.Transaction == nil {
+               return nil, errors.Wrap(txbuilder.ErrMissingRawTx)
+       }
+
+       err := a.finalizeTxWait(ctx, tpl, waitUntil)
+       if err != nil {
+               return nil, errors.Wrapf(err, "tx %s", tpl.Transaction.ID.String())
+       }
+
+       return map[string]string{"id": tpl.Transaction.ID.String()}, nil
+}
+
+/*
+// recordSubmittedTx records a lower bound height at which the tx
+// was first submitted to the tx pool. If this request fails for
+// some reason, a retry will know to look for the transaction in
+// blocks starting at this height.
+//
+// If the tx has already been submitted, it returns the existing
+// height.
+func recordSubmittedTx(ctx context.Context, db pg.DB, txHash bc.Hash, currentHeight uint64) (uint64, error) {
+       const insertQ = `
+               INSERT INTO submitted_txs (tx_hash, height) VALUES($1, $2)
+               ON CONFLICT DO NOTHING
+       `
+       res, err := db.Exec(ctx, insertQ, txHash.Bytes(), currentHeight)
+       if err != nil {
+               return 0, err
+       }
+       inserted, err := res.RowsAffected()
+       if err != nil {
+               return 0, err
+       }
+       if inserted == 1 {
+               return currentHeight, nil
+       }
+
+       // The insert didn't affect any rows, meaning there was already an entry
+       // for this transaction hash.
+       const selectQ = `
+               SELECT height FROM submitted_txs WHERE tx_hash = $1
+       `
+       var height uint64
+       err = db.QueryRow(ctx, selectQ, txHash.Bytes()).Scan(&height)
+       return height, err
+}
+*/
+
+/*
+// cleanUpSubmittedTxs will periodically delete records of submitted txs
+// older than a day. This function blocks and only exits when its context
+// is cancelled.
+func cleanUpSubmittedTxs(ctx context.Context, db pg.DB) {
+       ticker := time.NewTicker(15 * time.Minute)
+       for {
+               select {
+               case <-ticker.C:
+                       // TODO(jackson): We could avoid expensive bulk deletes by partitioning
+                       // the table and DROP-ing tables of expired rows. Partitioning doesn't
+                       // play well with ON CONFLICT clauses though, so we would need to rework
+                       // how we guarantee uniqueness.
+                       const q = `DELETE FROM submitted_txs WHERE submitted_at < now() - interval '1 day'`
+                       _, err := db.Exec(ctx, q)
+                       if err != nil {
+                               log.Error(ctx, err)
+                       }
+               case <-ctx.Done():
+                       ticker.Stop()
+                       return
+               }
+       }
+}
+*/
+
+// finalizeTxWait calls FinalizeTx and then waits for confirmation of
+// the transaction.  A nil error return means the transaction is
+// confirmed on the blockchain.  ErrRejected means a conflicting tx is
+// on the blockchain.  context.DeadlineExceeded means ctx is an
+// expiring context that timed out.
+func (a *BlockchainReactor) finalizeTxWait(ctx context.Context, txTemplate *txbuilder.Template, waitUntil string) error {
+       // Use the current generator height as the lower bound of the block height
+       // that the transaction may appear in.
+       localHeight := a.chain.Height()
+       generatorHeight := localHeight
+
+       // Remember this height in case we retry this submit call.
+       /*height, err := recordSubmittedTx(ctx, a.db, txTemplate.Transaction.ID, generatorHeight)
+       if err != nil {
+               return errors.Wrap(err, "saving tx submitted height")
+       }*/
+
+       err := txbuilder.FinalizeTx(ctx, a.chain, a.submitter, txTemplate.Transaction)
+       if err != nil {
+               return err
+       }
+       if waitUntil == "none" {
+               return nil
+       }
+
+       _, err = a.waitForTxInBlock(ctx, txTemplate.Transaction, generatorHeight)
+       if err != nil {
+               return err
+       }
+       if waitUntil == "confirmed" {
+               return nil
+       }
+       /*
+
+               select {
+               case <-ctx.Done():
+                       return ctx.Err()
+               case <-a.pinStore.AllWaiter(height):
+               }
+       */
+
+       return nil
+}
+
+func (a *BlockchainReactor) waitForTxInBlock(ctx context.Context, tx *legacy.Tx, height uint64) (uint64, error) {
+       for {
+               height++
+               select {
+               case <-ctx.Done():
+                       return 0, ctx.Err()
+
+               case <-a.chain.BlockWaiter(height):
+                       b, err := a.chain.GetBlock(height)
+                       if err != nil {
+                               return 0, errors.Wrap(err, "getting block that just landed")
+                       }
+                       for _, confirmed := range b.Transactions {
+                               if confirmed.ID == tx.ID {
+                                       // confirmed
+                                       return height, nil
+                               }
+                       }
+
+                       if tx.MaxTime > 0 && tx.MaxTime < b.TimestampMS {
+                               return 0, errors.Wrap(txbuilder.ErrRejected, "transaction max time exceeded")
+                       }
+
+                       // might still be in pool or might be rejected; we can't
+                       // tell definitively until its max time elapses.
+
+                       // Re-insert into the pool in case it was dropped.
+                       err = txbuilder.FinalizeTx(ctx, a.chain, a.submitter, tx)
+                       if err != nil {
+                               return 0, err
+                       }
+
+                       // TODO(jackson): Do simple rejection checks like checking if
+                       // the tx's blockchain prevouts still exist in the state tree.
+               }
+       }
+}
+
+type submitArg struct {
+       Transactions []txbuilder.Template
+       wait         chainjson.Duration
+       WaitUntil    string `json:"wait_until"` // values none, confirmed, processed. default: processed
+}
+
+// POST /submit-transaction
+func (a *BlockchainReactor) submit(ctx context.Context, x submitArg) (interface{}, error) {
+       // Setup a timeout for the provided wait duration.
+       timeout := x.wait.Duration
+       if timeout <= 0 {
+               timeout = 30 * time.Second
+       }
+       ctx, cancel := context.WithTimeout(ctx, timeout)
+       defer cancel()
+
+       responses := make([]interface{}, len(x.Transactions))
+       var wg sync.WaitGroup
+       wg.Add(len(responses))
+       for i := range responses {
+               go func(i int) {
+                       subctx := reqid.NewSubContext(ctx, reqid.New())
+                       defer wg.Done()
+                       defer batchRecover(subctx, &responses[i])
+
+                       tx, err := a.submitSingle(subctx, &x.Transactions[i], x.WaitUntil)
+                       if err != nil {
+                               responses[i] = err
+                       } else {
+                               responses[i] = tx
+                       }
+               }(i)
+       }
+
+       wg.Wait()
+       return responses, nil
+}
index 04d791d..d85eb06 100644 (file)
@@ -3,5 +3,5 @@ go build
 
 ---------------Run:
 For two node test。
-step1:  ./test.sh node1
-step2: ./test.sh node2
+step1:  ./test.sh bytom0
+step2: ./test.sh bytom1
index 5ca80da..767c877 100644 (file)
@@ -9,7 +9,7 @@ import (
        cmn "github.com/tendermint/tmlibs/common"
 )
 
-var initFilesCmd = &cobra.Command{
+var initFilesCmd = &cobra.Command {
        Use:   "init",
        Short: "Initialize blockchain",
        Run:   initFiles,
@@ -20,28 +20,15 @@ func init() {
 }
 
 func initFiles(cmd *cobra.Command, args []string) {
-       privValFile := config.PrivValidatorFile()
-       if _, err := os.Stat(privValFile); os.IsNotExist(err) {
-               privValidator := types.GenPrivValidator()
-               privValidator.SetFile(privValFile)
-               privValidator.Save()
-
-               genFile := config.GenesisFile()
-
-               if _, err := os.Stat(genFile); os.IsNotExist(err) {
-                       genDoc := types.GenesisDoc{
-                               ChainID: cmn.Fmt("chain0"),
-                       }
-                       genDoc.Validators = []types.GenesisValidator{types.GenesisValidator{
-                               PubKey: privValidator.PubKey,
-                               Amount: 10,
-                       }}
-
-                       genDoc.SaveAs(genFile)
-               }
+       genFile := config.GenesisFile()
 
-               logger.Info("Initialized tendermint", "genesis", config.GenesisFile(), "priv_validator", config.PrivValidatorFile())
-       } else {
-               logger.Info("Already initialized", "priv_validator", config.PrivValidatorFile())
+       if _, err := os.Stat(genFile); os.IsNotExist(err) {
+               genDoc := types.GenesisDoc{
+                       ChainID: cmn.Fmt("bytom"),
+                       PrivateKey: "27F82582AEFAE7AB151CFB01C48BB6C1A0DA78F9BDDA979A9F70A84D074EB07D3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8",
+               }
+               genDoc.SaveAs(genFile)
        }
+
+       logger.Info("Initialized bytom", "genesis", config.GenesisFile())
 }
index e8a288c..0f1b1b6 100644 (file)
@@ -3,7 +3,6 @@ package commands
 import (
        "fmt"
        "io/ioutil"
-       "time"
 
        "github.com/spf13/cobra"
 
@@ -29,32 +28,24 @@ func init() {
 }
 
 func runNode(cmd *cobra.Command, args []string) error {
-
-       // Wait until the genesis doc becomes available
-       // This is for Mintnet compatibility.
-       // TODO: If Mintnet gets deprecated or genesis_file is
-       // always available, remove.
        genDocFile := config.GenesisFile()
-       if !cmn.FileExists(genDocFile) {
-               logger.Info(cmn.Fmt("Waiting for genesis file %v...", genDocFile))
-               for {
-                       time.Sleep(time.Second)
-                       if !cmn.FileExists(genDocFile) {
-                               continue
-                       }
-                       jsonBlob, err := ioutil.ReadFile(genDocFile)
-                       if err != nil {
-                               return fmt.Errorf("Couldn't read GenesisDoc file: %v", err)
-                       }
-                       genDoc, err := types.GenesisDocFromJSON(jsonBlob)
-                       if err != nil {
-                               return fmt.Errorf("Error reading GenesisDoc: %v", err)
-                       }
-                       if genDoc.ChainID == "" {
-                               return fmt.Errorf("Genesis doc %v must include non-empty chain_id", genDocFile)
-                       }
-                       config.ChainID = genDoc.ChainID
+       if cmn.FileExists(genDocFile) {
+               jsonBlob, err := ioutil.ReadFile(genDocFile)
+               if err != nil {
+                       return fmt.Errorf("Couldn't read GenesisDoc file: %v", err)
+               }
+               genDoc, err := types.GenesisDocFromJSON(jsonBlob)
+               if err != nil {
+                       return fmt.Errorf("Error reading GenesisDoc: %v", err)
                }
+               if genDoc.ChainID == "" {
+                       return fmt.Errorf("Genesis doc %v must include non-empty chain_id", genDocFile)
+               }
+               config.ChainID = genDoc.ChainID
+               config.PrivateKey = genDoc.PrivateKey
+               config.Time = genDoc.GenesisTime
+       } else {
+               return fmt.Errorf("not find genesis.json")
        }
 
        // Create & start node
index aa15795..1a53823 100755 (executable)
@@ -1,11 +1,11 @@
 #!/bin/bash
 
-if [ "$1" = "node1" ];
+if [ "$1" = "bytom0" ];
 then
-    ./bytom node --home ./test/.blockchain
-elif [ "$1" = "node2" ];
+    ./bytom node --home ./test/.bytom0
+elif [ "$1" = "bytom1" ];
 then
-    ./bytom node --home ./test/.blockchain1
+    ./bytom node --home ./test/.bytom1
 else
-    echo "please cin -----./test.sh node1[node2]------ ."
+    echo "please cin -----./test.sh bytom0[bytom1]------ ."
 fi
diff --git a/cmd/bytom/test/.blockchain/genesis.json b/cmd/bytom/test/.blockchain/genesis.json
deleted file mode 100644 (file)
index 51052f2..0000000
+++ /dev/null
@@ -1 +0,0 @@
-{"genesis_time":"0001-01-01T00:00:00Z","chain_id":"chain0","validators":[{"pub_key":{"type":"ed25519","data":"72C3013E7C237C3A45865B05B1B9F9C8CA8E00186777862E59CECCF1771DDE70"},"amount":10,"name":""}],"app_hash":""}
\ No newline at end of file
diff --git a/cmd/bytom/test/.blockchain/priv_validator.json b/cmd/bytom/test/.blockchain/priv_validator.json
deleted file mode 100644 (file)
index 56e28d2..0000000
+++ /dev/null
@@ -1 +0,0 @@
-{"address":"6F1C63636D9F0247AE829415735AC04B6AFE8374","pub_key":{"type":"ed25519","data":"72C3013E7C237C3A45865B05B1B9F9C8CA8E00186777862E59CECCF1771DDE70"},"last_height":0,"last_round":0,"last_step":0,"last_signature":null,"priv_key":{"type":"ed25519","data":"630BEB082A08D0EF9E37FE02E0A17F46E2A63E4B0FA0EF9089B811D211E5140572C3013E7C237C3A45865B05B1B9F9C8CA8E00186777862E59CECCF1771DDE70"}}
\ No newline at end of file
diff --git a/cmd/bytom/test/.blockchain1/genesis.json b/cmd/bytom/test/.blockchain1/genesis.json
deleted file mode 100644 (file)
index b5a11df..0000000
+++ /dev/null
@@ -1 +0,0 @@
-{"genesis_time":"0001-01-01T00:00:00Z","chain_id":"chain0","validators":[{"pub_key":{"type":"ed25519","data":"A4CEE75748FFFB2DE321CC07D813E5421BD148B4852042C3AC4F1FB5AE0B2BBF"},"amount":10,"name":""}],"app_hash":""}
\ No newline at end of file
diff --git a/cmd/bytom/test/.blockchain1/priv_validator.json b/cmd/bytom/test/.blockchain1/priv_validator.json
deleted file mode 100644 (file)
index b0036d4..0000000
+++ /dev/null
@@ -1 +0,0 @@
-{"address":"55FA5FFF009D41AEBB6C08A76E628ED55F1A5BA6","pub_key":{"type":"ed25519","data":"A4CEE75748FFFB2DE321CC07D813E5421BD148B4852042C3AC4F1FB5AE0B2BBF"},"last_height":0,"last_round":0,"last_step":0,"last_signature":null,"priv_key":{"type":"ed25519","data":"35A8F7DA011682AF9EF58EEB33F4B5F3208A08624CED870B60B9C36CDC82E71FA4CEE75748FFFB2DE321CC07D813E5421BD148B4852042C3AC4F1FB5AE0B2BBF"}}
\ No newline at end of file
similarity index 88%
rename from cmd/bytom/test/.blockchain/config.toml
rename to cmd/bytom/test/.bytom0/config.toml
index cbaa9fa..2c5cfb9 100644 (file)
@@ -1,11 +1,11 @@
 # This is a TOML config file.
 # For more information, see https://github.com/toml-lang/toml
 
-proxy_app = "tcp://127.0.0.1:46658"
 moniker = "anonymous"
 fast_sync = true
 db_backend = "leveldb"
 log_level = "state:info,*:info"
+api_addr = "0.0.0.0:1999"
 
 [rpc]
 laddr = "tcp://0.0.0.0:46657"
diff --git a/cmd/bytom/test/.bytom0/genesis.json b/cmd/bytom/test/.bytom0/genesis.json
new file mode 100644 (file)
index 0000000..48617ac
--- /dev/null
@@ -0,0 +1 @@
+{"genesis_time":"0001-01-01T00:00:00Z","chain_id":"bytom","private_key":"27F82582AEFAE7AB151CFB01C48BB6C1A0DA78F9BDDA979A9F70A84D074EB07D3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8","app_hash":""}
\ No newline at end of file
similarity index 88%
rename from cmd/bytom/test/.blockchain1/config.toml
rename to cmd/bytom/test/.bytom1/config.toml
index 0d41fa7..9ea5425 100644 (file)
@@ -1,11 +1,11 @@
 # This is a TOML config file.
 # For more information, see https://github.com/toml-lang/toml
 
-proxy_app = "tcp://127.0.0.1:46658"
 moniker = "anonymous"
 fast_sync = true
 db_backend = "leveldb"
 log_level = "state:info,*:info"
+api_addr = "0.0.0.0:2000"
 
 [rpc]
 laddr = "tcp://0.0.0.0:46658"
diff --git a/cmd/bytom/test/.bytom1/genesis.json b/cmd/bytom/test/.bytom1/genesis.json
new file mode 100644 (file)
index 0000000..48617ac
--- /dev/null
@@ -0,0 +1 @@
+{"genesis_time":"0001-01-01T00:00:00Z","chain_id":"bytom","private_key":"27F82582AEFAE7AB151CFB01C48BB6C1A0DA78F9BDDA979A9F70A84D074EB07D3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8","app_hash":""}
\ No newline at end of file
diff --git a/cmd/bytomcli/example/issue.go b/cmd/bytomcli/example/issue.go
new file mode 100644 (file)
index 0000000..0b00e95
--- /dev/null
@@ -0,0 +1,76 @@
+package example
+
+import (
+       //"bytes"
+       "context"
+//     "flag"
+       "fmt"
+       //"io"
+       //"net"
+       //"net/http"
+       //"os"
+       //"path/filepath"
+       //"strings"
+       //"time"
+       //stdjson "encoding/json"
+
+       //"github.com/bytom/blockchain"
+       "github.com/bytom/blockchain/rpc"
+       //"github.com/bytom/crypto/ed25519"
+       //"github.com/bytom/env"
+       //"github.com/bytom/errors"
+       //"github.com/bytom/log"
+       "github.com/bytom/crypto/ed25519/chainkd"
+)
+
+// TO DO: issue a asset to a account.
+func IssueTest(client *rpc.Client, args []string) {
+       // Create Account.
+       fmt.Printf("To create Account:\n")
+       xprv, _ := chainkd.NewXPrv(nil)
+       xpub := xprv.XPub()
+       fmt.Printf("xprv_account:%v\n", xprv)
+       fmt.Printf("xpub_account:%v\n", xpub)
+       type Ins struct {
+           RootXPubs []chainkd.XPub `json:"root_xpubs"`
+               Quorum    int
+               Alias     string
+               Tags      map[string]interface{}
+               ClientToken string `json:"client_token"`
+       }
+       var ins Ins
+       ins.RootXPubs = []chainkd.XPub{xpub}
+       ins.Quorum = 1
+       ins.Alias = "alice"
+       ins.Tags = map[string]interface{}{"test_tag": "v0",}
+       ins.ClientToken = "account"
+       account := make([]interface{}, 50)
+       client.Call(context.Background(), "/create-account", &[]Ins{ins,}, &account)
+       fmt.Printf("account:%v\n", account)
+
+
+       // Create Asset.
+       fmt.Printf("To create Asset:\n")
+       xprv_asset, _ := chainkd.NewXPrv(nil)
+       xpub_asset := xprv_asset.XPub()
+       fmt.Printf("xprv_asset:%v\n", xprv_asset)
+       fmt.Printf("xpub_asset:%v\n", xpub_asset)
+       type Ins_asset struct {
+           RootXPubs []chainkd.XPub `json:"root_xpubs"`
+               Quorum    int
+               Alias     string
+               Tags      map[string]interface{}
+               Definition  map[string]interface{}
+               ClientToken string `json:"client_token"`
+       }
+       var ins_asset Ins_asset
+       ins_asset.RootXPubs = []chainkd.XPub{xpub_asset}
+       ins_asset.Quorum = 1
+       ins_asset.Alias = "gold"
+       ins_asset.Tags = map[string]interface{}{"test_tag": "v0",}
+       ins_asset.Definition = map[string]interface{}{"test_definition": "v0"}
+       ins_asset.ClientToken = "asset"
+       asset := make([]interface{}, 50)
+       client.Call(context.Background(), "/create-asset", &[]Ins_asset{ins_asset,}, &asset)
+       fmt.Printf("asset:%v\n", asset)
+}
diff --git a/cmd/bytomcli/issue.py b/cmd/bytomcli/issue.py
new file mode 100644 (file)
index 0000000..1b34fc7
--- /dev/null
@@ -0,0 +1,9 @@
+import os
+
+account = os.popen('./bytomcli create-account fdafa')
+
+print account.read()
+
+asset = os.popen('./bytomcli create-asset  fafdafd')
+
+print asset.read()
index a31ee79..12ce0db 100644 (file)
@@ -1,10 +1,8 @@
-// Command corectl provides miscellaneous control functions for a Chain Core.
 package main
 
 import (
        "bytes"
        "context"
-//     "encoding/hex"
        "flag"
        "fmt"
        "io"
@@ -12,7 +10,6 @@ import (
        "net/http"
        "os"
        "path/filepath"
-       //"strconv"
        "strings"
        "time"
        stdjson "encoding/json"
@@ -25,10 +22,9 @@ import (
        "github.com/bytom/crypto/ed25519"
        "github.com/bytom/env"
        "github.com/bytom/errors"
-//     "github.com/bytom/generated/rev"
        "github.com/bytom/log"
        "github.com/bytom/crypto/ed25519/chainkd"
-       //"github.com/bytom/protocol/bc"
+       "github.com/bytom/cmd/bytomcli/example"
 )
 
 // config vars
@@ -57,10 +53,7 @@ type grantReq struct {
 }
 
 var commands = map[string]*command{
-       //"config-generator":     {configGenerator},
        "create-block-keypair": {createBlockKeyPair},
-       //"create-token":         {createToken},
-       //"config":               {configNongenerator},
        "reset":                {reset},
        "grant":                {grant},
        "revoke":               {revoke},
@@ -69,6 +62,7 @@ var commands = map[string]*command{
        "update-account-tags":  {updateAccountTags},
        "create-asset":         {createAsset},
        "update-asset-tags":    {updateAssetTags},
+       "build-transaction": {buildTransaction},
        "create-control-program": {createControlProgram},
        "create-account-receiver": {createAccountReceiver},
        "create-transaction-feed": {createTxFeed},
@@ -80,6 +74,8 @@ var commands = map[string]*command{
         "list-transactions":       {listTransactions},
         "list-balances":           {listBalances},
         "list-unspent-outputs":    {listUnspentOutputs},
+       "delete-transaction-feed": {deleteTxFeed},
+       "issue-test": {example.IssueTest},
 }
 
 func main() {
@@ -114,97 +110,6 @@ func main() {
        cmd.f(mustRPCClient(), os.Args[2:])
 }
 
-/*
-func configGenerator(client *rpc.Client, args []string) {
-       const usage = "usage: corectl config-generator [flags] [quorum] [pubkey url]..."
-       var (
-               quorum  uint32
-               signers []*config.BlockSigner
-               err     error
-       )
-
-       var flags flag.FlagSet
-       maxIssuanceWindow := flags.Duration("w", 24*time.Hour, "the maximum issuance window `duration` for this generator")
-       flagK := flags.String("k", "", "local `pubkey` for signing blocks")
-       flagHSMURL := flags.String("hsm-url", "", "hsm `url` for signing blocks (mockhsm if empty)")
-       flagHSMToken := flags.String("hsm-token", "", "hsm `access-token` for connecting to hsm")
-
-       flags.Usage = func() {
-               fmt.Println(usage)
-               flags.PrintDefaults()
-               os.Exit(1)
-       }
-       flags.Parse(args)
-       args = flags.Args()
-
-       // not a blocksigner
-       if *flagK == "" && *flagHSMURL != "" {
-               fatalln("error: flag -hsm-url has no effect without -k")
-       }
-
-       // TODO(ameets): update when switching to x.509 authorization
-       if (*flagHSMURL == "") != (*flagHSMToken == "") {
-               fatalln("error: flags -hsm-url and -hsm-token must be given together")
-       }
-
-       if len(args) == 0 {
-               if *flagK != "" {
-                       quorum = 1
-               }
-       } else if len(args)%2 != 1 {
-               fatalln(usage)
-       } else {
-               q64, err := strconv.ParseUint(args[0], 10, 32)
-               if err != nil {
-                       fatalln(usage)
-               }
-               quorum = uint32(q64)
-
-               for i := 1; i < len(args); i += 2 {
-                       pubkey, err := hex.DecodeString(args[i])
-                       if err != nil {
-                               fatalln(usage)
-                       }
-                       if len(pubkey) != ed25519.PublicKeySize {
-                               fatalln("error:", "bad ed25519 public key length")
-                       }
-                       url := args[i+1]
-                       signers = append(signers, &config.BlockSigner{
-                               Pubkey: pubkey,
-                               Url:    url,
-                       })
-               }
-       }
-
-       var blockPub []byte
-       if *flagK != "" {
-               blockPub, err = hex.DecodeString(*flagK)
-               if err != nil {
-                       fatalln("error: unable to decode block pub")
-               }
-       }
-
-       conf := &config.Config{
-               IsGenerator:         true,
-               Quorum:              quorum,
-               Signers:             signers,
-               MaxIssuanceWindowMs: bc.DurationMillis(*maxIssuanceWindow),
-               IsSigner:            *flagK != "",
-               BlockPub:            blockPub,
-               BlockHsmUrl:         *flagHSMURL,
-               BlockHsmAccessToken: *flagHSMToken,
-       }
-
-       err = client.Call(context.Background(), "/configure", conf, nil)
-       dieOnRPCError(err)
-
-       wait(client, nil)
-       var r map[string]interface{}
-       err = client.Call(context.Background(), "/info", nil, &r)
-       dieOnRPCError(err)
-       fmt.Println(r["blockchain_id"])
-}
-*/
 
 func createBlockKeyPair(client *rpc.Client, args []string) {
        if len(args) != 0 {
@@ -218,108 +123,6 @@ func createBlockKeyPair(client *rpc.Client, args []string) {
        fmt.Printf("%x\n", pub.Pub)
 }
 
-/*
-func createToken(client *rpc.Client, args []string) {
-       const usage = "usage: corectl create-token [-net] [name] [policy]"
-       var flags flag.FlagSet
-       flagNet := flags.Bool("net", false, "DEPRECATED. create a network token instead of client")
-       flags.Usage = func() {
-               fmt.Println(usage)
-               flags.PrintDefaults()
-               os.Exit(1)
-       }
-       flags.Parse(args)
-       args = flags.Args()
-       if len(args) == 2 && *flagNet || len(args) < 1 || len(args) > 2 {
-               fatalln(usage)
-       }
-
-       req := struct{ ID string }{args[0]}
-       var tok accesstoken.Token
-       // TODO(kr): find a way to make this atomic with the grant below
-       err := client.Call(context.Background(), "/create-access-token", req, &tok)
-       dieOnRPCError(err)
-       fmt.Println(tok.Token)
-
-       grant := grantReq{
-               GuardType: "access_token",
-               GuardData: map[string]string{"id": tok.ID},
-       }
-       switch {
-       case len(args) == 2:
-               grant.Policy = args[1]
-       case *flagNet:
-               grant.Policy = "crosscore"
-               fmt.Fprintln(os.Stderr, "warning: the network flag is deprecated")
-       default:
-               grant.Policy = "client-readwrite"
-               fmt.Fprintln(os.Stderr, "warning: implicit policy name is deprecated")
-       }
-       err = client.Call(context.Background(), "/create-authorization-grant", grant, nil)
-       dieOnRPCError(err, "Auth grant error:")
-}
-*/
-
-/*
-func configNongenerator(client *rpc.Client, args []string) {
-       const usage = "usage: corectl config [flags] [blockchain-id] [generator-url]"
-       var flags flag.FlagSet
-       flagT := flags.String("t", "", "generator access `token`")
-       flagK := flags.String("k", "", "local `pubkey` for signing blocks")
-       flagHSMURL := flags.String("hsm-url", "", "hsm `url` for signing blocks (mockhsm if empty)")
-       flagHSMToken := flags.String("hsm-token", "", "hsm `acc
-ess-token` for connecting to hsm")
-
-       flags.Usage = func() {
-               fmt.Println(usage)
-               flags.PrintDefaults()
-               os.Exit(1)
-       }
-       flags.Parse(args)
-       args = flags.Args()
-       if len(args) < 2 {
-               fatalln(usage)
-       }
-
-       // not a blocksigner
-       if *flagK == "" && *flagHSMURL != "" {
-               fatalln("error: flag -hsm-url has no effect without -k")
-       }
-
-       // TODO(ameets): update when switching to x.509 authorization
-       if (*flagHSMURL == "") != (*flagHSMToken == "") {
-               fatalln("error: flags -hsm-url and -hsm-token must be given together")
-       }
-
-       var blockchainID bc.Hash
-       err := blockchainID.UnmarshalText([]byte(args[0]))
-       if err != nil {
-               fatalln("error: invalid blockchain ID:", err)
-       }
-
-       var blockPub []byte
-       if *flagK != "" {
-               blockPub, err = hex.DecodeString(*flagK)
-               if err != nil {
-                       fatalln("error: unable to decode block pub")
-               }
-       }
-
-       var conf config.Config
-       conf.BlockchainId = &blockchainID
-       conf.GeneratorUrl = args[1]
-       conf.GeneratorAccessToken = *flagT
-       conf.IsSigner = *flagK != ""
-       conf.BlockPub = blockPub
-       conf.BlockHsmUrl = *flagHSMURL
-       conf.BlockHsmAccessToken = *flagHSMToken
-
-       client.BlockchainID = blockchainID.String()
-       err = client.Call(context.Background(), "/configure", conf, nil)
-       dieOnRPCError(err)
-}
-*/
-
 // reset will attempt a reset rpc call on a remote core. If the
 // core is not configured with reset capabilities an error is returned.
 func reset(client *rpc.Client, args []string) {
@@ -517,7 +320,7 @@ func createAccount(client *rpc.Client, args []string) {
        var ins Ins
        ins.RootXPubs = []chainkd.XPub{xpub}
        ins.Quorum = 1
-       ins.Alias = "aa"
+       ins.Alias = "alice"
        ins.Tags = map[string]interface{}{"test_tag": "v0",}
        ins.ClientToken = args[0]
        responses := make([]interface{}, 50)
@@ -548,7 +351,7 @@ func createAsset(client *rpc.Client, args []string) {
        var ins Ins
        ins.RootXPubs = []chainkd.XPub{xpub}
        ins.Quorum = 1
-       ins.Alias = "aa"
+       ins.Alias = "bob"
        ins.Tags = map[string]interface{}{"test_tag": "v0",}
        ins.Definition = map[string]interface{}{}
        ins.ClientToken = args[0]
@@ -573,29 +376,35 @@ func updateAccountTags(client *rpc.Client,args []string){
        ins.ID = &aa
        ins.Alias = &alias
        ins.Tags = map[string]interface{}{"test_tag": "v0",}
-        responses := make([]interface{}, 50)
-        client.Call(context.Background(), "/update-account-tags", &[]Ins{ins,}, &responses)
-        fmt.Printf("responses:%v\n", responses)
+       responses := make([]interface{}, 50)
+       client.Call(context.Background(), "/update-account-tags", &[]Ins{ins,}, &responses)
+       fmt.Printf("responses:%v\n", responses)
 }
 
 func updateAssetTags(client *rpc.Client, args []string){
-        if len(args) != 0{
-                fatalln("error:updateAccountTags not use args")
-        }
+       if len(args) != 0{
+                       fatalln("error:updateAccountTags not use args")
+       }
        type Ins struct {
        ID    *string
        Alias *string
        Tags  map[string]interface{} `json:"tags"`
-}
+       }
        var ins Ins
        id := "123456"
        alias := "asdfg"
        ins.ID = &id
        ins.Alias = &alias
-        ins.Tags = map[string]interface{}{"test_tag": "v0",}
-        responses := make([]interface{}, 50)
-        client.Call(context.Background(), "/update-asset-tags", &[]Ins{ins,}, &responses)
-        fmt.Printf("responses:%v\n", responses)
+       ins.Tags = map[string]interface{}{"test_tag": "v0",}
+       responses := make([]interface{}, 50)
+       client.Call(context.Background(), "/update-asset-tags", &[]Ins{ins,}, &responses)
+       fmt.Printf("responses:%v\n", responses)
+}
+
+func buildTransaction(client *rpc.Client, args []string) {
+       if len(args) != 0 {
+               fatalln("error:updateAccountTags not use args")
+       }
 }
 
 func createControlProgram(client *rpc.Client, args []string){
index f75695d..351e778 100644 (file)
@@ -5,7 +5,7 @@ import (
        "path/filepath"
        "time"
 
-       "github.com/bytom/types"
+       //"github.com/bytom/types"
 )
 
 type Config struct {
@@ -15,8 +15,6 @@ type Config struct {
        // Options for services
        RPC       *RPCConfig       `mapstructure:"rpc"`
        P2P       *P2PConfig       `mapstructure:"p2p"`
-       Mempool   *MempoolConfig   `mapstructure:"mempool"`
-       Consensus *ConsensusConfig `mapstructure:"consensus"`
 }
 
 func DefaultConfig() *Config {
@@ -24,8 +22,6 @@ func DefaultConfig() *Config {
                BaseConfig: DefaultBaseConfig(),
                RPC:        DefaultRPCConfig(),
                P2P:        DefaultP2PConfig(),
-               Mempool:    DefaultMempoolConfig(),
-               Consensus:  DefaultConsensusConfig(),
        }
 }
 
@@ -34,8 +30,6 @@ func TestConfig() *Config {
                BaseConfig: TestBaseConfig(),
                RPC:        TestRPCConfig(),
                P2P:        TestP2PConfig(),
-               Mempool:    DefaultMempoolConfig(),
-               Consensus:  TestConsensusConfig(),
        }
 }
 
@@ -44,15 +38,12 @@ func (cfg *Config) SetRoot(root string) *Config {
        cfg.BaseConfig.RootDir = root
        cfg.RPC.RootDir = root
        cfg.P2P.RootDir = root
-       cfg.Mempool.RootDir = root
-       cfg.Consensus.RootDir = root
        return cfg
 }
 
 //-----------------------------------------------------------------------------
 // BaseConfig
 
-// BaseConfig struct for a Tendermint node
 type BaseConfig struct {
        // The root directory for all data.
        // This should be set in viper so it can unmarshal into this struct
@@ -65,18 +56,11 @@ type BaseConfig struct {
        Genesis string `mapstructure:"genesis_file"`
 
        // A JSON file containing the private key to use as a validator in the consensus protocol
-       PrivValidator string `mapstructure:"priv_validator_file"`
+       PrivateKey string `mapstructure:"private_key"`
 
        // A custom human readable name for this node
        Moniker string `mapstructure:"moniker"`
 
-       // TCP or UNIX socket address of the ABCI application,
-       // or the name of an ABCI application compiled in with the Tendermint binary
-       ProxyApp string `mapstructure:"proxy_app"`
-
-       // Mechanism to connect to the ABCI application: socket | grpc
-       ABCI string `mapstructure:"abci"`
-
        // Output level for logging
        LogLevel string `mapstructure:"log_level"`
 
@@ -88,8 +72,6 @@ type BaseConfig struct {
        // and verifying their commits
        FastSync bool `mapstructure:"fast_sync"`
 
-       // If true, query the ABCI app on connecting to a new peer
-       // so the app can decide if we should keep the connection or not
        FilterPeers bool `mapstructure:"filter_peers"` // false
 
        // What indexer to use for transactions
@@ -100,15 +82,16 @@ type BaseConfig struct {
 
        // Database directory
        DBPath string `mapstructure:"db_dir"`
+
+       ApiAddress string `mapstructure:"api_addr"`
+
+       Time time.Time
 }
 
 func DefaultBaseConfig() BaseConfig {
        return BaseConfig{
                Genesis:           "genesis.json",
-               PrivValidator:     "priv_validator.json",
                Moniker:           "anonymous",
-               ProxyApp:          "tcp://127.0.0.1:46658",
-               ABCI:              "socket",
                LogLevel:          DefaultPackageLogLevels(),
                ProfListenAddress: "",
                FastSync:          true,
@@ -121,8 +104,7 @@ func DefaultBaseConfig() BaseConfig {
 
 func TestBaseConfig() BaseConfig {
        conf := DefaultBaseConfig()
-       conf.ChainID = "tendermint_test"
-       conf.ProxyApp = "dummy"
+       conf.ChainID = "bytom_test"
        conf.FastSync = false
        conf.DBBackend = "memdb"
        return conf
@@ -132,10 +114,6 @@ func (b BaseConfig) GenesisFile() string {
        return rootify(b.Genesis, b.RootDir)
 }
 
-func (b BaseConfig) PrivValidatorFile() string {
-       return rootify(b.PrivValidator, b.RootDir)
-}
-
 func (b BaseConfig) DBDir() string {
        return rootify(b.DBPath, b.RootDir)
 }
@@ -216,124 +194,6 @@ func (p *P2PConfig) AddrBookFile() string {
 }
 
 //-----------------------------------------------------------------------------
-// MempoolConfig
-
-type MempoolConfig struct {
-       RootDir      string `mapstructure:"home"`
-       Recheck      bool   `mapstructure:"recheck"`
-       RecheckEmpty bool   `mapstructure:"recheck_empty"`
-       Broadcast    bool   `mapstructure:"broadcast"`
-       WalPath      string `mapstructure:"wal_dir"`
-}
-
-func DefaultMempoolConfig() *MempoolConfig {
-       return &MempoolConfig{
-               Recheck:      true,
-               RecheckEmpty: true,
-               Broadcast:    true,
-               WalPath:      "data/mempool.wal",
-       }
-}
-
-func (m *MempoolConfig) WalDir() string {
-       return rootify(m.WalPath, m.RootDir)
-}
-
-//-----------------------------------------------------------------------------
-// ConsensusConfig
-
-// ConsensusConfig holds timeouts and details about the WAL, the block structure,
-// and timeouts in the consensus protocol.
-type ConsensusConfig struct {
-       RootDir  string `mapstructure:"home"`
-       WalPath  string `mapstructure:"wal_file"`
-       WalLight bool   `mapstructure:"wal_light"`
-       walFile  string // overrides WalPath if set
-
-       // All timeouts are in ms
-       TimeoutPropose        int `mapstructure:"timeout_propose"`
-       TimeoutProposeDelta   int `mapstructure:"timeout_propose_delta"`
-       TimeoutPrevote        int `mapstructure:"timeout_prevote"`
-       TimeoutPrevoteDelta   int `mapstructure:"timeout_prevote_delta"`
-       TimeoutPrecommit      int `mapstructure:"timeout_precommit"`
-       TimeoutPrecommitDelta int `mapstructure:"timeout_precommit_delta"`
-       TimeoutCommit         int `mapstructure:"timeout_commit"`
-
-       // Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
-       SkipTimeoutCommit bool `mapstructure:"skip_timeout_commit"`
-
-       // BlockSize
-       MaxBlockSizeTxs   int `mapstructure:"max_block_size_txs"`
-       MaxBlockSizeBytes int `mapstructure:"max_block_size_bytes"`
-
-       // TODO: This probably shouldn't be exposed but it makes it
-       // easy to write tests for the wal/replay
-       BlockPartSize int `mapstructure:"block_part_size"`
-}
-
-// Wait this long for a proposal
-func (cfg *ConsensusConfig) Propose(round int) time.Duration {
-       return time.Duration(cfg.TimeoutPropose+cfg.TimeoutProposeDelta*round) * time.Millisecond
-}
-
-// After receiving any +2/3 prevote, wait this long for stragglers
-func (cfg *ConsensusConfig) Prevote(round int) time.Duration {
-       return time.Duration(cfg.TimeoutPrevote+cfg.TimeoutPrevoteDelta*round) * time.Millisecond
-}
-
-// After receiving any +2/3 precommits, wait this long for stragglers
-func (cfg *ConsensusConfig) Precommit(round int) time.Duration {
-       return time.Duration(cfg.TimeoutPrecommit+cfg.TimeoutPrecommitDelta*round) * time.Millisecond
-}
-
-// After receiving +2/3 precommits for a single block (a commit), wait this long for stragglers in the next height's RoundStepNewHeight
-func (cfg *ConsensusConfig) Commit(t time.Time) time.Time {
-       return t.Add(time.Duration(cfg.TimeoutCommit) * time.Millisecond)
-}
-
-func DefaultConsensusConfig() *ConsensusConfig {
-       return &ConsensusConfig{
-               WalPath:               "data/cs.wal/wal",
-               WalLight:              false,
-               TimeoutPropose:        3000,
-               TimeoutProposeDelta:   500,
-               TimeoutPrevote:        1000,
-               TimeoutPrevoteDelta:   500,
-               TimeoutPrecommit:      1000,
-               TimeoutPrecommitDelta: 500,
-               TimeoutCommit:         1000,
-               SkipTimeoutCommit:     false,
-               MaxBlockSizeTxs:       10000,
-               MaxBlockSizeBytes:     1,                          // TODO
-               BlockPartSize:         types.DefaultBlockPartSize, // TODO: we shouldnt be importing types
-       }
-}
-
-func TestConsensusConfig() *ConsensusConfig {
-       config := DefaultConsensusConfig()
-       config.TimeoutPropose = 2000
-       config.TimeoutProposeDelta = 1
-       config.TimeoutPrevote = 10
-       config.TimeoutPrevoteDelta = 1
-       config.TimeoutPrecommit = 10
-       config.TimeoutPrecommitDelta = 1
-       config.TimeoutCommit = 10
-       config.SkipTimeoutCommit = true
-       return config
-}
-
-func (c *ConsensusConfig) WalFile() string {
-       if c.walFile != "" {
-               return c.walFile
-       }
-       return rootify(c.WalPath, c.RootDir)
-}
-
-func (c *ConsensusConfig) SetWalFile(walFile string) {
-       c.walFile = walFile
-}
-
-//-----------------------------------------------------------------------------
 // Utils
 
 // helper function to make config creation independent of root dir
index 6379960..673b4a4 100644 (file)
@@ -12,17 +12,13 @@ func TestDefaultConfig(t *testing.T) {
        // set up some defaults
        cfg := DefaultConfig()
        assert.NotNil(cfg.P2P)
-       assert.NotNil(cfg.Mempool)
-       assert.NotNil(cfg.Consensus)
 
        // check the root dir stuff...
        cfg.SetRoot("/foo")
        cfg.Genesis = "bar"
        cfg.DBPath = "/opt/data"
-       cfg.Mempool.WalPath = "wal/mem/"
 
        assert.Equal("/foo/bar", cfg.GenesisFile())
        assert.Equal("/opt/data", cfg.DBDir())
-       assert.Equal("/foo/wal/mem", cfg.Mempool.WalDir())
 
 }
index 4a5a353..50262aa 100644 (file)
@@ -28,11 +28,11 @@ func EnsureRoot(rootDir string) {
 var defaultConfigTmpl = `# This is a TOML config file.
 # For more information, see https://github.com/toml-lang/toml
 
-proxy_app = "tcp://127.0.0.1:46658"
 moniker = "__MONIKER__"
 fast_sync = true
 db_backend = "leveldb"
 log_level = "state:info,*:info"
+api_addr = "0.0.0.0:1999"
 
 [rpc]
 laddr = "tcp://0.0.0.0:46657"
@@ -49,16 +49,14 @@ func defaultConfig(moniker string) string {
 /****** these are for test settings ***********/
 
 func ResetTestRoot(testName string) *Config {
-       rootDir := os.ExpandEnv("$HOME/.tendermint_test")
+       rootDir := os.ExpandEnv("$HOME/.test")
        rootDir = filepath.Join(rootDir, testName)
-       // Remove ~/.tendermint_test_bak
        if cmn.FileExists(rootDir + "_bak") {
                err := os.RemoveAll(rootDir + "_bak")
                if err != nil {
                        cmn.PanicSanity(err.Error())
                }
        }
-       // Move ~/.tendermint_test to ~/.tendermint_test_bak
        if cmn.FileExists(rootDir) {
                err := os.Rename(rootDir, rootDir+"_bak")
                if err != nil {
@@ -91,11 +89,11 @@ func ResetTestRoot(testName string) *Config {
 var testConfigTmpl = `# This is a TOML config file.
 # For more information, see https://github.com/toml-lang/toml
 
-proxy_app = "dummy"
 moniker = "__MONIKER__"
 fast_sync = false
 db_backend = "memdb"
 log_level = "info"
+api_addr = "0.0.0.0:1999"
 
 [rpc]
 laddr = "tcp://0.0.0.0:36657"
index dd77f25..afd9e50 100644 (file)
@@ -1,46 +1,48 @@
 package node
 
 import (
-    "context"
+       "context"
        "crypto/tls"
+       "net"
        "net/http"
+       "os"
        "strings"
-    "net"
        "sync"
-       "os"
        "time"
 
-       crypto "github.com/tendermint/go-crypto"
-       wire "github.com/tendermint/go-wire"
+       bc "github.com/bytom/blockchain"
        cfg "github.com/bytom/config"
        p2p "github.com/bytom/p2p"
+       "github.com/bytom/protocol/bc/legacy"
+       rpccore "github.com/bytom/rpc/core"
+       grpccore "github.com/bytom/rpc/grpc"
        "github.com/bytom/types"
        "github.com/bytom/version"
+       crypto "github.com/tendermint/go-crypto"
+       wire "github.com/tendermint/go-wire"
        cmn "github.com/tendermint/tmlibs/common"
+       dbm "github.com/tendermint/tmlibs/db"
        "github.com/tendermint/tmlibs/log"
-    bc "github.com/bytom/blockchain"
-    dbm "github.com/tendermint/tmlibs/db"
-    "github.com/bytom/protocol/bc/legacy"
-       rpccore "github.com/bytom/rpc/core"
-       grpccore "github.com/bytom/rpc/grpc"
        //rpc "github.com/blockchain/rpc/lib"
-       rpcserver "github.com/bytom/rpc/lib/server"
-    "github.com/bytom/blockchain/account"
-    "github.com/bytom/protocol"
-    "github.com/bytom/blockchain/txdb"
+       "github.com/bytom/blockchain/account"
+       "github.com/bytom/blockchain/txdb"
        "github.com/bytom/net/http/reqid"
-//     "github.com/bytom/net/http/static"
-//     "github.com/bytom/generated/dashboard"
+       "github.com/bytom/protocol"
+       "github.com/bytom/blockchain/asset"
+       rpcserver "github.com/bytom/rpc/lib/server"
+       //      "github.com/bytom/net/http/static"
+       //      "github.com/bytom/generated/dashboard"
+
        "github.com/bytom/env"
-       "github.com/kr/secureheader"
-       bytomlog "github.com/bytom/log"
        "github.com/bytom/errors"
+       bytomlog "github.com/bytom/log"
+       "github.com/kr/secureheader"
 
        _ "net/http/pprof"
 )
 
 const (
-    httpReadTimeout  = 2 * time.Minute
+       httpReadTimeout  = 2 * time.Minute
        httpWriteTimeout = time.Hour
 )
 
@@ -48,8 +50,7 @@ type Node struct {
        cmn.BaseService
 
        // config
-       config        *cfg.Config
-       privValidator *types.PrivValidator // local node's validator key
+       config *cfg.Config
 
        // network
        privKey  crypto.PrivKeyEd25519 // local node's p2p key
@@ -57,18 +58,18 @@ type Node struct {
        addrBook *p2p.AddrBook         // known peers
 
        // services
-       evsw             types.EventSwitch           // pub/sub for services
-//    blockStore       *bc.MemStore
-    blockStore       *txdb.Store
-    bcReactor        *bc.BlockchainReactor
-    accounts         *account.Manager
-    rpcListeners     []net.Listener              // rpc servers
+       evsw types.EventSwitch // pub/sub for services
+       //    blockStore       *bc.MemStore
+       blockStore   *txdb.Store
+       bcReactor    *bc.BlockchainReactor
+       accounts     *account.Manager
+       assets       *asset.Registry
+       rpcListeners []net.Listener // rpc servers
 }
 
 var (
-    // config vars
+       // config vars
        rootCAs       = env.String("ROOT_CA_CERTS", "") // file path
-       listenAddr    = env.String("LISTEN", ":1999")
        splunkAddr    = os.Getenv("SPLUNKADDR")
        logFile       = os.Getenv("LOGFILE")
        logSize       = env.Int("LOGSIZE", 5e6) // 5MB
@@ -84,19 +85,16 @@ var (
        buildTag    = "?"
        buildCommit = "?"
        buildDate   = "?"
-       race []interface{} // initialized in race.go
+       race        []interface{} // initialized in race.go
 )
 
-
 func NewNodeDefault(config *cfg.Config, logger log.Logger) *Node {
-       // Get PrivValidator
-       privValidator := types.LoadOrGenPrivValidator(config.PrivValidatorFile(), logger)
-       return NewNode(config, privValidator, logger)
+       return NewNode(config, logger)
 }
 
 func RedirectHandler(next http.Handler) http.Handler {
-    return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
-           if req.URL.Path == "/" {
+       return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+               if req.URL.Path == "/" {
                        http.Redirect(w, req, "/dashboard/", http.StatusFound)
                        return
                }
@@ -104,14 +102,13 @@ func RedirectHandler(next http.Handler) http.Handler {
        })
 }
 
-
 type waitHandler struct {
-    h  http.Handler
+       h  http.Handler
        wg sync.WaitGroup
 }
 
 func (wh *waitHandler) Set(h http.Handler) {
-    wh.h = h
+       wh.h = h
        wh.wg.Done()
 }
 
@@ -120,7 +117,7 @@ func (wh *waitHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
        wh.h.ServeHTTP(w, req)
 }
 
-func rpcInit(h *bc.BlockchainReactor) {
+func rpcInit(h *bc.BlockchainReactor, config *cfg.Config) {
        // The waitHandler accepts incoming requests, but blocks until its underlying
        // handler is set, when the second phase is complete.
        var coreHandler waitHandler
@@ -148,6 +145,7 @@ func rpcInit(h *bc.BlockchainReactor) {
                // https://github.com/golang/go/issues/17071
                TLSNextProto: map[string]func(*http.Server, *tls.Conn, http.Handler){},
        }
+       listenAddr := env.String("LISTEN", config.ApiAddress)
        listener, _ := net.Listen("tcp", *listenAddr)
 
        // The `Serve` call has to happen in its own goroutine because
@@ -160,17 +158,23 @@ func rpcInit(h *bc.BlockchainReactor) {
        coreHandler.Set(h)
 }
 
-func NewNode(config *cfg.Config, privValidator *types.PrivValidator, logger log.Logger) *Node {
+func setupGenesisBlock(config *cfg.Config) (*legacy.Block, error) {
+       var timestamp time.Time = config.Time
+       return protocol.NewInitialBlock(timestamp)
+}
+
+func NewNode(config *cfg.Config, logger log.Logger) *Node {
        // Get store
-    tx_db := dbm.NewDB("txdb", config.DBBackend, config.DBDir())
-    store := txdb.NewStore(tx_db)
-    genesisBlock := legacy.Block {
-        BlockHeader: legacy.BlockHeader {
-            Version: 1,
-            Height: 0,
-        },
-    }
-    store.SaveBlock(&genesisBlock)
+       tx_db := dbm.NewDB("txdb", config.DBBackend, config.DBDir())
+       store := txdb.NewStore(tx_db)
+       /*genesisBlock := legacy.Block {
+             BlockHeader: legacy.BlockHeader {
+                 Version: 1,
+                 Height: 0,
+             },
+         }
+         store.SaveBlock(&genesisBlock)
+       */
 
        // Generate node PrivKey
        privKey := crypto.GenPrivKeyEd25519()
@@ -183,36 +187,37 @@ func NewNode(config *cfg.Config, privValidator *types.PrivValidator, logger log.
                cmn.Exit(cmn.Fmt("Failed to start switch: %v", err))
        }
 
-
        p2pLogger := logger.With("module", "p2p")
 
        sw := p2p.NewSwitch(config.P2P)
        sw.SetLogger(p2pLogger)
 
-    fastSync := config.FastSync
-    genesisblock, err := protocol.NewInitialBlock()
-    if err != nil {
-      cmn.Exit(cmn.Fmt("initialize genesisblock failed: %v", err))
-    }
-
-    chain, err := protocol.NewChain(context.Background(), genesisblock.Hash(), store, nil)
-   /* if err != nil {
-      cmn.Exit(cmn.Fmt("protocol new chain failed: %v", err))
-    }
-    err = chain.CommitAppliedBlock(context.Background(), block, state.Empty())
-    if err != nil {
-      cmn.Exit(cmn.Fmt("commit block failed: %v", err))
-    }
-    chain.MaxIssuanceWindow = bc.MillisDuration(c.MaxIssuanceWindowMs)
-    */
-
-    accounts_db := dbm.NewDB("account", config.DBBackend, config.DBDir())
-    accounts := account.NewManager(accounts_db, chain)
-    bcReactor := bc.NewBlockchainReactor(store, chain, accounts, fastSync)
-    bcReactor.SetLogger(logger.With("module", "blockchain"))
-    sw.AddReactor("BLOCKCHAIN", bcReactor)
-
-       rpcInit(bcReactor)
+       fastSync := config.FastSync
+       genesisBlock, err := setupGenesisBlock(config)
+       if err != nil {
+               cmn.Exit(cmn.Fmt("initialize genesisblock failed: %v", err))
+       }
+
+       chain, err := protocol.NewChain(context.Background(), genesisBlock.Hash(), store, nil)
+       /* if err != nil {
+            cmn.Exit(cmn.Fmt("protocol new chain failed: %v", err))
+          }
+          err = chain.CommitAppliedBlock(context.Background(), block, state.Empty())
+          if err != nil {
+            cmn.Exit(cmn.Fmt("commit block failed: %v", err))
+          }
+          chain.MaxIssuanceWindow = bc.MillisDuration(c.MaxIssuanceWindowMs)
+       */
+
+       accounts_db := dbm.NewDB("account", config.DBBackend, config.DBDir())
+       accounts := account.NewManager(accounts_db, chain)
+       assets_db := dbm.NewDB("asset", config.DBBackend, config.DBDir())
+       assets := asset.NewRegistry(assets_db, chain)
+       bcReactor := bc.NewBlockchainReactor(store, chain, accounts, assets, fastSync)
+       bcReactor.SetLogger(logger.With("module", "blockchain"))
+       sw.AddReactor("BLOCKCHAIN", bcReactor)
+
+       rpcInit(bcReactor, config)
        // Optionally, start the pex reactor
        var addrBook *p2p.AddrBook
        if config.P2P.PexReactor {
@@ -237,17 +242,17 @@ func NewNode(config *cfg.Config, privValidator *types.PrivValidator, logger log.
        }
 
        node := &Node{
-               config:        config,
-               privValidator: privValidator,
+               config: config,
 
                privKey:  privKey,
                sw:       sw,
                addrBook: addrBook,
 
-               evsw:      eventSwitch,
-        bcReactor: bcReactor,
-        blockStore: store,
-        accounts: accounts,
+               evsw:       eventSwitch,
+               bcReactor:  bcReactor,
+               blockStore: store,
+               accounts:   accounts,
+               assets:     assets,
        }
        node.BaseService = *cmn.NewBaseService(logger, "Node", node)
        return node
@@ -331,7 +336,6 @@ func (n *Node) ConfigureRPC() {
        //rpccore.SetConsensusState(n.consensusState)
        //rpccore.SetMempool(n.mempoolReactor.Mempool)
        rpccore.SetSwitch(n.sw)
-       //rpccore.SetPubKey(n.privValidator.PubKey)
        //rpccore.SetGenesisDoc(n.genesisDoc)
        rpccore.SetAddrBook(n.addrBook)
        //rpccore.SetProxyAppQuery(n.proxyApp.Query())
@@ -372,7 +376,7 @@ func (n *Node) startRPC() ([]net.Listener, error) {
                }
                listeners = append(listeners, listener)
        }
-    return listeners, nil
+       return listeners, nil
 }
 
 func (n *Node) Switch() *p2p.Switch {
@@ -383,11 +387,6 @@ func (n *Node) EventSwitch() types.EventSwitch {
        return n.evsw
 }
 
-// XXX: for convenience
-func (n *Node) PrivValidator() *types.PrivValidator {
-       return n.privValidator
-}
-
 func (n *Node) makeNodeInfo() *p2p.NodeInfo {
        nodeInfo := &p2p.NodeInfo{
                PubKey:  n.privKey.PubKey().Unwrap().(crypto.PubKeyEd25519),
index 3119892..1405bfa 100644 (file)
@@ -22,7 +22,6 @@ It has these top-level messages:
        Nonce
        Output
        Retirement
-       TimeRange
        Issuance
        Spend
 */
@@ -273,15 +272,15 @@ func (m *ValueDestination) GetPosition() uint64 {
 }
 
 type BlockHeader struct {
-       Version              uint64   `protobuf:"varint,1,opt,name=version" json:"version,omitempty"`
-       Height               uint64   `protobuf:"varint,2,opt,name=height" json:"height,omitempty"`
-       PreviousBlockId      *Hash    `protobuf:"bytes,3,opt,name=previous_block_id,json=previousBlockId" json:"previous_block_id,omitempty"`
-       TimestampMs          uint64   `protobuf:"varint,4,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
-       TransactionsRoot     *Hash    `protobuf:"bytes,5,opt,name=transactions_root,json=transactionsRoot" json:"transactions_root,omitempty"`
-       AssetsRoot           *Hash    `protobuf:"bytes,6,opt,name=assets_root,json=assetsRoot" json:"assets_root,omitempty"`
-       NextConsensusProgram []byte   `protobuf:"bytes,7,opt,name=next_consensus_program,json=nextConsensusProgram,proto3" json:"next_consensus_program,omitempty"`
-       ExtHash              *Hash    `protobuf:"bytes,8,opt,name=ext_hash,json=extHash" json:"ext_hash,omitempty"`
-       WitnessArguments     [][]byte `protobuf:"bytes,9,rep,name=witness_arguments,json=witnessArguments,proto3" json:"witness_arguments,omitempty"`
+       Version          uint64 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"`
+       SerializedSize   uint64 `protobuf:"varint,2,opt,name=serialized_size,json=serializedSize" json:"serialized_size,omitempty"`
+       Height           uint64 `protobuf:"varint,3,opt,name=height" json:"height,omitempty"`
+       PreviousBlockId  *Hash  `protobuf:"bytes,4,opt,name=previous_block_id,json=previousBlockId" json:"previous_block_id,omitempty"`
+       TimestampMs      uint64 `protobuf:"varint,5,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
+       TransactionsRoot *Hash  `protobuf:"bytes,6,opt,name=transactions_root,json=transactionsRoot" json:"transactions_root,omitempty"`
+       AssetsRoot       *Hash  `protobuf:"bytes,7,opt,name=assets_root,json=assetsRoot" json:"assets_root,omitempty"`
+       Nonce            uint64 `protobuf:"varint,8,opt,name=nonce" json:"nonce,omitempty"`
+       Bits             uint64 `protobuf:"varint,9,opt,name=bits" json:"bits,omitempty"`
 }
 
 func (m *BlockHeader) Reset()                    { *m = BlockHeader{} }
@@ -331,34 +330,14 @@ func (m *BlockHeader) GetAssetsRoot() *Hash {
        return nil
 }
 
-func (m *BlockHeader) GetNextConsensusProgram() []byte {
-       if m != nil {
-               return m.NextConsensusProgram
-       }
-       return nil
-}
-
-func (m *BlockHeader) GetExtHash() *Hash {
-       if m != nil {
-               return m.ExtHash
-       }
-       return nil
-}
-
-func (m *BlockHeader) GetWitnessArguments() [][]byte {
-       if m != nil {
-               return m.WitnessArguments
-       }
-       return nil
-}
-
 type TxHeader struct {
-       Version   uint64  `protobuf:"varint,1,opt,name=version" json:"version,omitempty"`
-       ResultIds []*Hash `protobuf:"bytes,2,rep,name=result_ids,json=resultIds" json:"result_ids,omitempty"`
-       Data      *Hash   `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
-       MinTimeMs uint64  `protobuf:"varint,4,opt,name=min_time_ms,json=minTimeMs" json:"min_time_ms,omitempty"`
-       MaxTimeMs uint64  `protobuf:"varint,5,opt,name=max_time_ms,json=maxTimeMs" json:"max_time_ms,omitempty"`
-       ExtHash   *Hash   `protobuf:"bytes,6,opt,name=ext_hash,json=extHash" json:"ext_hash,omitempty"`
+       Version        uint64  `protobuf:"varint,1,opt,name=version" json:"version,omitempty"`
+       SerializedSize uint64  `protobuf:"varint,2,opt,name=serialized_size,json=serializedSize" json:"serialized_size,omitempty"`
+       ResultIds      []*Hash `protobuf:"bytes,3,rep,name=result_ids,json=resultIds" json:"result_ids,omitempty"`
+       Data           *Hash   `protobuf:"bytes,4,opt,name=data" json:"data,omitempty"`
+       MinTimeMs      uint64  `protobuf:"varint,5,opt,name=min_time_ms,json=minTimeMs" json:"min_time_ms,omitempty"`
+       MaxTimeMs      uint64  `protobuf:"varint,6,opt,name=max_time_ms,json=maxTimeMs" json:"max_time_ms,omitempty"`
+       ExtHash        *Hash   `protobuf:"bytes,7,opt,name=ext_hash,json=extHash" json:"ext_hash,omitempty"`
 }
 
 func (m *TxHeader) Reset()                    { *m = TxHeader{} }
@@ -458,10 +437,9 @@ func (m *Mux) GetWitnessArguments() [][]byte {
 
 type Nonce struct {
        Program           *Program `protobuf:"bytes,1,opt,name=program" json:"program,omitempty"`
-       TimeRangeId       *Hash    `protobuf:"bytes,2,opt,name=time_range_id,json=timeRangeId" json:"time_range_id,omitempty"`
-       ExtHash           *Hash    `protobuf:"bytes,3,opt,name=ext_hash,json=extHash" json:"ext_hash,omitempty"`
-       WitnessArguments  [][]byte `protobuf:"bytes,4,rep,name=witness_arguments,json=witnessArguments,proto3" json:"witness_arguments,omitempty"`
-       WitnessAnchoredId *Hash    `protobuf:"bytes,5,opt,name=witness_anchored_id,json=witnessAnchoredId" json:"witness_anchored_id,omitempty"`
+       ExtHash           *Hash    `protobuf:"bytes,2,opt,name=ext_hash,json=extHash" json:"ext_hash,omitempty"`
+       WitnessArguments  [][]byte `protobuf:"bytes,3,rep,name=witness_arguments,json=witnessArguments,proto3" json:"witness_arguments,omitempty"`
+       WitnessAnchoredId *Hash    `protobuf:"bytes,4,opt,name=witness_anchored_id,json=witnessAnchoredId" json:"witness_anchored_id,omitempty"`
 }
 
 func (m *Nonce) Reset()                    { *m = Nonce{} }
@@ -476,13 +454,6 @@ func (m *Nonce) GetProgram() *Program {
        return nil
 }
 
-func (m *Nonce) GetTimeRangeId() *Hash {
-       if m != nil {
-               return m.TimeRangeId
-       }
-       return nil
-}
-
 func (m *Nonce) GetExtHash() *Hash {
        if m != nil {
                return m.ExtHash
@@ -504,6 +475,22 @@ func (m *Nonce) GetWitnessAnchoredId() *Hash {
        return nil
 }
 
+type Coinbase struct {
+       WitnessDestination *ValueDestination `protobuf:"bytes,1,opt,name=witness_destination,json=witnessDestination" json:"witness_destination,omitempty"`
+}
+
+func (m *Coinbase) Reset()                    { *m = Coinbase{} }
+func (m *Coinbase) String() string            { return proto.CompactTextString(m) }
+func (*Coinbase) ProtoMessage()               {}
+func (*Coinbase) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+
+func (m *Coinbase) GetWitnessDestination() *ValueDestination {
+       if m != nil {
+               return m.WitnessDestination
+       }
+       return nil
+}
+
 type Output struct {
        Source         *ValueSource `protobuf:"bytes,1,opt,name=source" json:"source,omitempty"`
        ControlProgram *Program     `protobuf:"bytes,2,opt,name=control_program,json=controlProgram" json:"control_program,omitempty"`
@@ -592,38 +579,6 @@ func (m *Retirement) GetOrdinal() uint64 {
        return 0
 }
 
-type TimeRange struct {
-       MinTimeMs uint64 `protobuf:"varint,1,opt,name=min_time_ms,json=minTimeMs" json:"min_time_ms,omitempty"`
-       MaxTimeMs uint64 `protobuf:"varint,2,opt,name=max_time_ms,json=maxTimeMs" json:"max_time_ms,omitempty"`
-       ExtHash   *Hash  `protobuf:"bytes,3,opt,name=ext_hash,json=extHash" json:"ext_hash,omitempty"`
-}
-
-func (m *TimeRange) Reset()                    { *m = TimeRange{} }
-func (m *TimeRange) String() string            { return proto.CompactTextString(m) }
-func (*TimeRange) ProtoMessage()               {}
-func (*TimeRange) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
-
-func (m *TimeRange) GetMinTimeMs() uint64 {
-       if m != nil {
-               return m.MinTimeMs
-       }
-       return 0
-}
-
-func (m *TimeRange) GetMaxTimeMs() uint64 {
-       if m != nil {
-               return m.MaxTimeMs
-       }
-       return 0
-}
-
-func (m *TimeRange) GetExtHash() *Hash {
-       if m != nil {
-               return m.ExtHash
-       }
-       return nil
-}
-
 type Issuance struct {
        AnchorId               *Hash             `protobuf:"bytes,1,opt,name=anchor_id,json=anchorId" json:"anchor_id,omitempty"`
        Value                  *AssetAmount      `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
@@ -782,7 +737,7 @@ func init() {
        proto.RegisterType((*Nonce)(nil), "bc.Nonce")
        proto.RegisterType((*Output)(nil), "bc.Output")
        proto.RegisterType((*Retirement)(nil), "bc.Retirement")
-       proto.RegisterType((*TimeRange)(nil), "bc.TimeRange")
+       proto.RegisterType((*Coinbase)(nil), "bc.Coinbase")
        proto.RegisterType((*Issuance)(nil), "bc.Issuance")
        proto.RegisterType((*Spend)(nil), "bc.Spend")
 }
index 0c61588..162037b 100644 (file)
@@ -51,24 +51,24 @@ message ValueDestination {
 
 message BlockHeader {
   uint64 version                   = 1;
-  uint64 height                    = 2;
-  Hash   previous_block_id         = 3;
-  uint64 timestamp_ms              = 4;
-  Hash   transactions_root         = 5;
-  Hash   assets_root               = 6;
-  bytes  next_consensus_program    = 7;
-  Hash   ext_hash                  = 8;
-
-  repeated bytes witness_arguments = 9;
+  uint64 serialized_size           = 2;
+  uint64 height                    = 3;
+  Hash   previous_block_id         = 4;
+  uint64 timestamp_ms              = 5;
+  Hash   transactions_root         = 6;
+  Hash   assets_root               = 7;
+  uint64 nonce                     = 8;
+  uint64 bits                      = 9;
 }
 
 message TxHeader {
-  uint64        version     = 1;
-  repeated Hash result_ids  = 2;
-  Hash          data        = 3;
-  uint64        min_time_ms = 4;
-  uint64        max_time_ms = 5;
-  Hash          ext_hash    = 6;
+  uint64        version         = 1;
+  uint64        serialized_size = 2;
+  repeated Hash result_ids      = 3;
+  Hash          data            = 4;
+  uint64        min_time_ms.    = 5;
+  uint64        max_time_ms.    = 6;
+  Hash          ext_hash        = 7;
 }
 
 message Mux {
@@ -81,10 +81,13 @@ message Mux {
 
 message Nonce {
   Program        program             = 1;
-  Hash           time_range_id       = 2;
-  Hash           ext_hash            = 3;
-  repeated bytes witness_arguments   = 4;
-  Hash           witness_anchored_id = 5;
+  Hash           ext_hash            = 2;
+  repeated bytes witness_arguments   = 3;
+  Hash           witness_anchored_id = 4;
+}
+
+message Coinbase {
+  ValueDestination witness_destination = 1;
 }
 
 message Output {
@@ -102,12 +105,6 @@ message Retirement {
   uint64      ordinal  = 4;
 }
 
-message TimeRange {
-  uint64    min_time_ms = 1;
-  uint64    max_time_ms = 2;
-  Hash      ext_hash    = 3;
-}
-
 message Issuance {
   Hash             anchor_id                = 1;
   AssetAmount      value                    = 2;
index 4e76347..1e7cabe 100644 (file)
@@ -2,6 +2,11 @@ package bc
 
 import "io"
 
+const (
+       subsidyReductionInterval = uint64(560640)
+       baseSubsidy              = uint64(624000000000)
+)
+
 // BlockHeader contains the header information for a blockchain
 // block. It satisfies the Entry interface.
 
@@ -13,20 +18,23 @@ func (bh *BlockHeader) writeForHash(w io.Writer) {
        mustWriteForHash(w, bh.TimestampMs)
        mustWriteForHash(w, bh.TransactionsRoot)
        mustWriteForHash(w, bh.AssetsRoot)
-       mustWriteForHash(w, bh.NextConsensusProgram)
-       mustWriteForHash(w, bh.ExtHash)
+}
+
+func (bh *BlockHeader) BlockSubsidy() uint64 {
+       return baseSubsidy >> uint(bh.Height/subsidyReductionInterval)
 }
 
 // NewBlockHeader creates a new BlockHeader and populates
 // its body.
-func NewBlockHeader(version, height uint64, previousBlockID *Hash, timestampMS uint64, transactionsRoot, assetsRoot *Hash, nextConsensusProgram []byte) *BlockHeader {
+func NewBlockHeader(version, height uint64, previousBlockID *Hash, timestampMS uint64, transactionsRoot, assetsRoot *Hash, nonce, bits uint64) *BlockHeader {
        return &BlockHeader{
-               Version:              version,
-               Height:               height,
-               PreviousBlockId:      previousBlockID,
-               TimestampMs:          timestampMS,
-               TransactionsRoot:     transactionsRoot,
-               AssetsRoot:           assetsRoot,
-               NextConsensusProgram: nextConsensusProgram,
+               Version:          version,
+               Height:           height,
+               PreviousBlockId:  previousBlockID,
+               TimestampMs:      timestampMS,
+               TransactionsRoot: transactionsRoot,
+               AssetsRoot:       assetsRoot,
+               Nonce:            nonce,
+               Bits:             bits,
        }
 }
diff --git a/protocol/bc/blockheader_test.go b/protocol/bc/blockheader_test.go
new file mode 100644 (file)
index 0000000..ebbba71
--- /dev/null
@@ -0,0 +1,33 @@
+package bc
+
+import (
+       "testing"
+)
+
+func TestSubsidy(t *testing.T) {
+       cases := []struct {
+               bh      *BlockHeader
+               subsidy uint64
+       }{
+               {
+                       bh: &BlockHeader{
+                               Height: 1,
+                       },
+                       subsidy: 624000000000,
+               },
+               {
+                       bh: &BlockHeader{
+                               Height: 560640,
+                       },
+                       subsidy: 312000000000,
+               },
+       }
+
+       for _, c := range cases {
+               subsidy := c.bh.BlockSubsidy()
+
+               if subsidy != c.subsidy {
+                       t.Errorf("got subsidy %s, want %s", subsidy, c.subsidy)
+               }
+       }
+}
diff --git a/protocol/bc/coinbase.go b/protocol/bc/coinbase.go
new file mode 100644 (file)
index 0000000..dd0cc37
--- /dev/null
@@ -0,0 +1,21 @@
+package bc
+
+import "io"
+
+func (Coinbase) typ() string { return "coinbase1" }
+func (c *Coinbase) writeForHash(w io.Writer) {
+       //mustWriteForHash(w, c.WitnessDestination)
+}
+
+func (c *Coinbase) SetDestination(id *Hash, val *AssetAmount, pos uint64) {
+       c.WitnessDestination = &ValueDestination{
+               Ref:      id,
+               Value:    val,
+               Position: pos,
+       }
+}
+
+// NewCoinbase creates a new Coinbase.
+func NewCoinbase() *Coinbase {
+       return &Coinbase{}
+}
index 51b41a0..e97f094 100644 (file)
@@ -11,9 +11,9 @@ func BenchmarkEntryID(b *testing.B) {
 
        entries := []Entry{
                NewIssuance(nil, &AssetAmount{}, &Hash{}, 0),
-               NewTxHeader(1, nil, &Hash{}, uint64(time.Now().Unix()), uint64(time.Now().Unix())),
+               NewTxHeader(1, 1, nil, &Hash{}, uint64(time.Now().Unix()), uint64(time.Now().Unix())),
                m,
-               NewNonce(&Program{Code: []byte{1}, VmVersion: 1}, nil),
+               NewNonce(&Program{Code: []byte{1}, VmVersion: 1}),
                NewOutput(&ValueSource{}, &Program{Code: []byte{1}, VmVersion: 1}, &Hash{}, 0),
                NewRetirement(&ValueSource{}, &Hash{}, 1),
                NewSpend(&Hash{}, &Hash{}, 0),
index c2387f0..7ad0bce 100644 (file)
@@ -17,9 +17,6 @@ type BlockCommitment struct {
        // the set of unspent outputs with asset version 1 after applying
        // the block.
        AssetsMerkleRoot bc.Hash
-
-       // ConsensusProgram is the predicate for validating the next block.
-       ConsensusProgram []byte
 }
 
 func (bc *BlockCommitment) readFrom(r *blockchain.Reader) error {
@@ -31,7 +28,6 @@ func (bc *BlockCommitment) readFrom(r *blockchain.Reader) error {
        if err != nil {
                return err
        }
-       bc.ConsensusProgram, err = blockchain.ReadVarstr31(r)
        return err
 }
 
@@ -44,6 +40,5 @@ func (bc *BlockCommitment) writeTo(w io.Writer) error {
        if err != nil {
                return err
        }
-       _, err = blockchain.WriteVarstr31(w, bc.ConsensusProgram)
        return err
 }
index 9951d6b..f2efe0a 100644 (file)
@@ -32,10 +32,9 @@ type BlockHeader struct {
        TimestampMS uint64
 
        BlockCommitment
-       CommitmentSuffix []byte
 
-       BlockWitness
-       WitnessSuffix []byte
+       Nonce uint64
+       Bits  uint64
 }
 
 // Time returns the time represented by the Timestamp in bh.
@@ -128,19 +127,19 @@ func (bh *BlockHeader) readFrom(r *blockchain.Reader) (uint8, error) {
                return 0, err
        }
 
-       bh.CommitmentSuffix, err = blockchain.ReadExtensibleString(r, bh.BlockCommitment.readFrom)
+       _, err = blockchain.ReadExtensibleString(r, bh.BlockCommitment.readFrom)
        if err != nil {
                return 0, err
        }
 
-       if serflags[0]&SerBlockWitness == SerBlockWitness {
-               bh.WitnessSuffix, err = blockchain.ReadExtensibleString(r, func(r *blockchain.Reader) (err error) {
-                       bh.Witness, err = blockchain.ReadVarstrList(r)
-                       return err
-               })
-               if err != nil {
-                       return 0, err
-               }
+       bh.Nonce, err = blockchain.ReadVarint63(r)
+       if err != nil {
+               return 0, err
+       }
+
+       bh.Bits, err = blockchain.ReadVarint63(r)
+       if err != nil {
+               return 0, err
        }
 
        return serflags[0], nil
@@ -174,18 +173,17 @@ func (bh *BlockHeader) writeTo(w io.Writer, serflags uint8) error {
        if err != nil {
                return err
        }
-
-       _, err = blockchain.WriteExtensibleString(w, bh.CommitmentSuffix, bh.BlockCommitment.writeTo)
+       _, err = blockchain.WriteExtensibleString(w, nil, bh.BlockCommitment.writeTo)
        if err != nil {
                return err
        }
-
-       if serflags&SerBlockWitness == SerBlockWitness {
-               _, err = blockchain.WriteExtensibleString(w, bh.WitnessSuffix, bh.BlockWitness.writeTo)
-               if err != nil {
-                       return err
-               }
+       _, err = blockchain.WriteVarint63(w, bh.Nonce)
+       if err != nil {
+               return err
+       }
+       _, err = blockchain.WriteVarint63(w, bh.Bits)
+       if err != nil {
+               return err
        }
-
        return nil
 }
index 23fe780..9efe856 100644 (file)
@@ -22,7 +22,8 @@ func TestMarshalBlock(t *testing.T) {
 
                Transactions: []*Tx{
                        NewTx(TxData{
-                               Version: 1,
+                               Version:        1,
+                               SerializedSize: uint64(46),
                                Outputs: []*TxOutput{
                                        NewTxOutput(bc.AssetID{}, 1, nil, nil),
                                },
@@ -41,12 +42,12 @@ func TestMarshalBlock(t *testing.T) {
                "01" + // block height
                "0000000000000000000000000000000000000000000000000000000000000000" + // prev block hash
                "00" + // timestamp
-               "41" + // commitment extensible field length
+               "40" + // commitment extensible field length
                "0000000000000000000000000000000000000000000000000000000000000000" + // tx merkle root
                "0000000000000000000000000000000000000000000000000000000000000000" + // assets merkle root
-               "00" + // consensus program
-               "01" + // witness extensible string length
-               "00" + // witness number of witness args
+               "00" + // nonce
+               "00" + // bits
+
                "01" + // num transactions
                "07" + // tx 0, serialization flags
                "01" + // tx 0, tx version
@@ -101,12 +102,11 @@ func TestEmptyBlock(t *testing.T) {
                "01" + // block height
                "0000000000000000000000000000000000000000000000000000000000000000" + // prev block hash
                "00" + // timestamp
-               "41" + // commitment extensible field length
+               "40" + // commitment extensible field length
                "0000000000000000000000000000000000000000000000000000000000000000" + // transactions merkle root
                "0000000000000000000000000000000000000000000000000000000000000000" + // assets merkle root
-               "00" + // consensus program
-               "01" + // witness extensible string length
-               "00" + // witness number of witness args
+               "00" + // nonce
+               "00" + // bits
                "00") // num transactions
        want, _ := hex.DecodeString(wantHex)
        if !bytes.Equal(got, want) {
@@ -119,18 +119,17 @@ func TestEmptyBlock(t *testing.T) {
                "01" + // block height
                "0000000000000000000000000000000000000000000000000000000000000000" + // prev block hash
                "00" + // timestamp
-               "41" + // commitment extensible field length
+               "40" + // commitment extensible field length
                "0000000000000000000000000000000000000000000000000000000000000000" + // transactions merkle root
                "0000000000000000000000000000000000000000000000000000000000000000" + // assets merkle root
-               "00" + // consensus program
-               "01" + // witness extensible string length
-               "00") // witness number of witness args
+               "00" + // nonce
+               "00") // bits
        want, _ = hex.DecodeString(wantHex)
        if !bytes.Equal(got, want) {
                t.Errorf("empty block header bytes = %x want %x", got, want)
        }
 
-       wantHash := mustDecodeHash("6a73cbca99e33c8403d589664623c74df34dd6d7328ab6e7f27dd3e60d959850")
+       wantHash := mustDecodeHash("f2e8d5be16096e275a51866eb5207b385c9aa02ec570a1540f445efcba05e04d")
        if h := block.Hash(); h != wantHash {
                t.Errorf("got block hash %x, want %x", h.Bytes(), wantHash.Bytes())
        }
@@ -156,12 +155,11 @@ func TestSmallBlock(t *testing.T) {
                "01" + // block height
                "0000000000000000000000000000000000000000000000000000000000000000" + // prev block hash
                "00" + // timestamp
-               "41" + // commitment extensible field length
+               "40" + // commitment extensible field length
                "0000000000000000000000000000000000000000000000000000000000000000" + // transactions merkle root
                "0000000000000000000000000000000000000000000000000000000000000000" + // assets merkle root
-               "00" + // consensus program
-               "01" + // witness extensible string length
-               "00" + // witness num witness args
+               "00" + // nonce
+               "00" + // bits
                "01" + // num transactions
                "070102000000000000") // transaction
        want, _ := hex.DecodeString(wantHex)
diff --git a/protocol/bc/legacy/block_witness.go b/protocol/bc/legacy/block_witness.go
deleted file mode 100644 (file)
index 222b848..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-package legacy
-
-import (
-       "io"
-
-       "github.com/bytom/encoding/blockchain"
-)
-
-type BlockWitness struct {
-       // Witness is a vector of arguments to the previous block's
-       // ConsensusProgram for validating this block.
-       Witness [][]byte
-}
-
-func (bw *BlockWitness) writeTo(w io.Writer) error {
-       _, err := blockchain.WriteVarstrList(w, bw.Witness)
-       return err
-}
index 1019dce..1d5087d 100644 (file)
@@ -121,8 +121,6 @@ func mapTx(tx *TxData) (headerID bc.Hash, hdr *bc.TxHeader, entryMap map[bc.Hash
                        )
 
                        if len(oldIss.Nonce) > 0 {
-                               tr := bc.NewTimeRange(tx.MinTime, tx.MaxTime)
-                               trID := addEntry(tr)
                                assetID := oldIss.AssetID()
 
                                builder := vmutil.NewBuilder()
@@ -130,7 +128,7 @@ func mapTx(tx *TxData) (headerID bc.Hash, hdr *bc.TxHeader, entryMap map[bc.Hash
                                builder.AddOp(vm.OP_ASSET).AddData(assetID.Bytes()).AddOp(vm.OP_EQUAL)
                                prog, _ := builder.Build() // error is impossible
 
-                               nonce := bc.NewNonce(&bc.Program{VmVersion: 1, Code: prog}, &trID)
+                               nonce := bc.NewNonce(&bc.Program{VmVersion: 1, Code: prog})
                                anchorID = addEntry(nonce)
                                setAnchored = nonce.SetAnchored
                        } else if firstSpend != nil {
@@ -163,6 +161,17 @@ func mapTx(tx *TxData) (headerID bc.Hash, hdr *bc.TxHeader, entryMap map[bc.Hash
                }
        }
 
+       if tx.IsCoinbase() {
+               cb := bc.NewCoinbase()
+               cbId := addEntry(cb)
+
+               out := tx.Outputs[0]
+               muxSources = []*bc.ValueSource{{
+                       Ref:   &cbId,
+                       Value: &out.AssetAmount,
+               }}
+       }
+
        mux := bc.NewMux(muxSources, &bc.Program{VmVersion: 1, Code: []byte{byte(vm.OP_TRUE)}})
        muxID := addEntry(mux)
 
@@ -174,6 +183,12 @@ func mapTx(tx *TxData) (headerID bc.Hash, hdr *bc.TxHeader, entryMap map[bc.Hash
                iss.SetDestination(&muxID, iss.Value, iss.Ordinal)
        }
 
+       if tx.IsCoinbase() {
+               muxSource := mux.Sources[0]
+               cb := entryMap[*muxSource.Ref].(*bc.Coinbase)
+               cb.SetDestination(&muxID, muxSource.Value, 0)
+       }
+
        var resultIDs []*bc.Hash
 
        for i, out := range tx.Outputs {
@@ -210,15 +225,14 @@ func mapTx(tx *TxData) (headerID bc.Hash, hdr *bc.TxHeader, entryMap map[bc.Hash
        }
 
        refdatahash := hashData(tx.ReferenceData)
-       h := bc.NewTxHeader(tx.Version, resultIDs, &refdatahash, tx.MinTime, tx.MaxTime)
+       h := bc.NewTxHeader(tx.Version, tx.SerializedSize, resultIDs, &refdatahash, tx.MinTime, tx.MaxTime)
        headerID = addEntry(h)
 
        return headerID, h, entryMap
 }
 
 func mapBlockHeader(old *BlockHeader) (bhID bc.Hash, bh *bc.BlockHeader) {
-       bh = bc.NewBlockHeader(old.Version, old.Height, &old.PreviousBlockHash, old.TimestampMS, &old.TransactionsMerkleRoot, &old.AssetsMerkleRoot, old.ConsensusProgram)
-       bh.WitnessArguments = old.Witness
+       bh = bc.NewBlockHeader(old.Version, old.Height, &old.PreviousBlockHash, old.TimestampMS, &old.TransactionsMerkleRoot, &old.AssetsMerkleRoot, old.Nonce, old.Bits)
        bhID = bc.EntryID(bh)
        return
 }
@@ -231,6 +245,7 @@ func MapBlock(old *Block) *bc.Block {
        b.ID, b.BlockHeader = mapBlockHeader(&old.BlockHeader)
        for _, oldTx := range old.Transactions {
                b.Transactions = append(b.Transactions, oldTx.Tx)
+               b.BlockHeader.SerializedSize += oldTx.TxData.SerializedSize
        }
        return b
 }
index 9ea0d5a..bc82270 100644 (file)
@@ -7,6 +7,7 @@ import (
        "github.com/davecgh/go-spew/spew"
 
        "github.com/bytom/protocol/bc"
+       "github.com/bytom/protocol/validation"
 )
 
 func TestMapTx(t *testing.T) {
@@ -22,6 +23,9 @@ func TestMapTx(t *testing.T) {
        if header.Version != 1 {
                t.Errorf("header.Version is %d, expected 1", header.Version)
        }
+       if header.SerializedSize != oldTx.SerializedSize {
+               t.Errorf("header.SerializedSize is %d, expected %d", header.SerializedSize, oldTx.SerializedSize)
+       }
        if header.MinTimeMs != oldTx.MinTime {
                t.Errorf("header.MinTimeMs is %d, expected %d", header.MinTimeMs, oldTx.MinTime)
        }
@@ -59,3 +63,59 @@ func TestMapTx(t *testing.T) {
                }
        }
 }
+
+func TestMapCoinbaseTx(t *testing.T) {
+       oldTx := &TxData{
+               Version: 1,
+               Inputs:  []*TxInput{},
+               Outputs: []*TxOutput{
+                       NewTxOutput(*validation.BTMAssetID, 800000000000, []byte{1}, nil),
+               },
+       }
+       oldOut := oldTx.Outputs[0]
+
+       _, header, entryMap := mapTx(oldTx)
+       t.Log(spew.Sdump(entryMap))
+
+       outEntry, ok := entryMap[*header.ResultIds[0]]
+       if !ok {
+               t.Errorf("entryMap contains nothing for output")
+               return
+       }
+       newOut, ok := outEntry.(*bc.Output)
+       if !ok {
+               t.Errorf("header.ResultIds[0] has type %T, expected *Output", outEntry)
+               return
+       }
+       if *newOut.Source.Value != oldOut.AssetAmount {
+               t.Errorf("(*output).Source is %v, expected %v", newOut.Source.Value, oldOut.AssetAmount)
+               return
+       }
+
+       muxEntry, ok := entryMap[*newOut.Source.Ref]
+       if !ok {
+               t.Errorf("entryMap contains nothing for mux")
+               return
+       }
+       mux, ok := muxEntry.(*bc.Mux)
+       if !ok {
+               t.Errorf("muxEntry has type %T, expected *Mux", muxEntry)
+               return
+       }
+       if *mux.WitnessDestinations[0].Value != oldOut.AssetAmount {
+               t.Errorf("(*Mux).Source is %v, expected %v", newOut.Source.Value, oldOut.AssetAmount)
+               return
+       }
+
+       if coinbaseEntry, ok := entryMap[*mux.Sources[0].Ref]; ok {
+               if coinbase, ok := coinbaseEntry.(*bc.Coinbase); ok {
+                       if *coinbase.WitnessDestination.Value != oldOut.AssetAmount {
+                               t.Errorf("(*Coinbase).Source is %v, expected %v", newOut.Source.Value, oldOut.AssetAmount)
+                       }
+               } else {
+                       t.Errorf("inputEntry has type %T, expected *Coinbase", coinbaseEntry)
+               }
+       } else {
+               t.Errorf("entryMap contains nothing for input")
+       }
+}
index 1e98bce..f14a24b 100644 (file)
@@ -80,9 +80,10 @@ const (
 // Most users will want to use Tx instead;
 // it includes the hash.
 type TxData struct {
-       Version uint64
-       Inputs  []*TxInput
-       Outputs []*TxOutput
+       Version        uint64
+       SerializedSize uint64
+       Inputs         []*TxInput
+       Outputs        []*TxOutput
 
        // Common fields
        MinTime uint64
@@ -107,6 +108,11 @@ func (tx *TxData) HasIssuance() bool {
        return false
 }
 
+// IsCoinbase returns true if this transaction is coinbase transaction.
+func (tx *TxData) IsCoinbase() bool {
+       return len(tx.Inputs) == 0 && len(tx.Outputs) == 1
+}
+
 func (tx *TxData) UnmarshalText(p []byte) error {
        b := make([]byte, hex.DecodedLen(len(p)))
        _, err := hex.Decode(b, p)
@@ -140,6 +146,8 @@ func (tx *TxData) readFrom(r *blockchain.Reader) error {
                return errors.Wrap(err, "reading transaction version")
        }
 
+       tx.SerializedSize = uint64(r.Len())
+
        // Common fields
        tx.CommonFieldsSuffix, err = blockchain.ReadExtensibleString(r, func(r *blockchain.Reader) error {
                tx.MinTime, err = blockchain.ReadVarint63(r)
index cfc4ee3..94b8c18 100644 (file)
@@ -46,12 +46,13 @@ func TestTransaction(t *testing.T) {
        }{
                {
                        tx: NewTx(TxData{
-                               Version:       1,
-                               Inputs:        nil,
-                               Outputs:       nil,
-                               MinTime:       0,
-                               MaxTime:       0,
-                               ReferenceData: nil,
+                               Version:        1,
+                               SerializedSize: uint64(7),
+                               Inputs:         nil,
+                               Outputs:        nil,
+                               MinTime:        0,
+                               MaxTime:        0,
+                               ReferenceData:  nil,
                        }),
                        hex: ("07" + // serflags
                                "01" + // transaction version
@@ -66,7 +67,8 @@ func TestTransaction(t *testing.T) {
                },
                {
                        tx: NewTx(TxData{
-                               Version: 1,
+                               Version:        1,
+                               SerializedSize: uint64(159),
                                Inputs: []*TxInput{
                                        NewIssuanceInput([]byte{10, 9, 8}, 1000000000000, []byte("input"), initialBlockHash, issuanceScript, [][]byte{[]byte{1, 2, 3}}, nil),
                                },
@@ -111,11 +113,12 @@ func TestTransaction(t *testing.T) {
                                "066f7574707574" + // output 0, reference data
                                "00" + // output 0, output witness
                                "0869737375616e6365"), // reference data
-                       hash: mustDecodeHash("cd4669d5363374f8661621273501c23e613fc98b0fab9d5d858f30e16ccd24ce"),
+                       hash: mustDecodeHash("515774561625cfe07629e49d4cf938d641aeb62af58e1b3ae2c582fee41dc628"),
                },
                {
                        tx: NewTx(TxData{
-                               Version: 1,
+                               Version:        1,
+                               SerializedSize: uint64(235),
                                Inputs: []*TxInput{
                                        NewSpendInput(nil, mustDecodeHash("dd385f6fe25d91d8c1bd0fa58951ad56b0c5229dcc01f61d9f9e8b9eb92d3292"), bc.AssetID{}, 1000000000000, 1, []byte{1}, bc.Hash{}, []byte("input")),
                                },
@@ -278,6 +281,43 @@ func TestHasIssuance(t *testing.T) {
        }
 }
 
+func TestIsCoinbase(t *testing.T) {
+       cases := []struct {
+               tx   *TxData
+               want bool
+       }{{
+               tx: &TxData{
+                       Inputs: []*TxInput{NewIssuanceInput(nil, 0, nil, bc.Hash{}, nil, nil, nil)},
+               },
+               want: false,
+       }, {
+               tx: &TxData{
+                       Inputs: []*TxInput{
+                               NewSpendInput(nil, bc.Hash{}, bc.AssetID{}, 0, 0, nil, bc.Hash{}, nil),
+                               NewIssuanceInput(nil, 0, nil, bc.Hash{}, nil, nil, nil),
+                       },
+                       Outputs: []*TxOutput{
+                               NewTxOutput(bc.AssetID{}, 0, nil, nil),
+                       },
+               },
+               want: false,
+       }, {
+               tx: &TxData{
+                       Outputs: []*TxOutput{
+                               NewTxOutput(bc.AssetID{}, 0, nil, nil),
+                       },
+               },
+               want: true,
+       }}
+
+       for _, c := range cases {
+               got := c.tx.IsCoinbase()
+               if got != c.want {
+                       t.Errorf("IsCoinbase(%+v) = %v want %v", c.tx, got, c.want)
+               }
+       }
+}
+
 func TestInvalidIssuance(t *testing.T) {
        hex := ("07" + // serflags
                "01" + // transaction version
index 90aad39..981929e 100644 (file)
@@ -52,7 +52,8 @@ func sampleTx() *TxData {
        initialBlockHash := mustDecodeHash("03deff1d4319d67baa10a6d26c1fea9c3e8d30e33474efee1a610a9bb49d758d")
        assetID := bc.ComputeAssetID([]byte{1}, &initialBlockHash, 1, &bc.EmptyStringHash)
        return &TxData{
-               Version: 1,
+               Version:        1,
+               SerializedSize: 66,
                Inputs: []*TxInput{
                        NewSpendInput(nil, mustDecodeHash("dd385f6fe25d91d8c1bd0fa58951ad56b0c5229dcc01f61d9f9e8b9eb92d3292"), assetID, 1000000000000, 1, []byte{1}, bc.Hash{}, []byte("input")),
                        NewSpendInput(nil, bc.NewHash([32]byte{0x11}), assetID, 1, 1, []byte{2}, bc.Hash{}, []byte("input2")),
index bf1fda3..8458833 100644 (file)
@@ -9,15 +9,13 @@ import "io"
 func (Nonce) typ() string { return "nonce1" }
 func (n *Nonce) writeForHash(w io.Writer) {
        mustWriteForHash(w, n.Program)
-       mustWriteForHash(w, n.TimeRangeId)
        mustWriteForHash(w, n.ExtHash)
 }
 
 // NewNonce creates a new Nonce.
-func NewNonce(p *Program, trID *Hash) *Nonce {
+func NewNonce(p *Program) *Nonce {
        return &Nonce{
-               Program:     p,
-               TimeRangeId: trID,
+               Program: p,
        }
 }
 
diff --git a/protocol/bc/timerange.go b/protocol/bc/timerange.go
deleted file mode 100644 (file)
index fc35bd5..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-package bc
-
-import "io"
-
-// TimeRange denotes a time range. It satisfies the Entry interface.
-
-func (TimeRange) typ() string { return "timerange1" }
-func (tr *TimeRange) writeForHash(w io.Writer) {
-       mustWriteForHash(w, tr.MinTimeMs)
-       mustWriteForHash(w, tr.MaxTimeMs)
-       mustWriteForHash(w, tr.ExtHash)
-}
-
-// NewTimeRange creates a new TimeRange.
-func NewTimeRange(minTimeMS, maxTimeMS uint64) *TimeRange {
-       return &TimeRange{
-               MinTimeMs: minTimeMS,
-               MaxTimeMs: maxTimeMS,
-       }
-}
index 1a13919..dd678dc 100644 (file)
@@ -34,18 +34,6 @@ var (
        ErrMissingEntry = errors.New("missing entry")
 )
 
-func (tx *Tx) TimeRange(id Hash) (*TimeRange, error) {
-       e, ok := tx.Entries[id]
-       if !ok || e == nil {
-               return nil, errors.Wrapf(ErrMissingEntry, "id %x", id.Bytes())
-       }
-       tr, ok := e.(*TimeRange)
-       if !ok {
-               return nil, errors.Wrapf(ErrEntryType, "entry %x has unexpected type %T", id.Bytes(), e)
-       }
-       return tr, nil
-}
-
 func (tx *Tx) Output(id Hash) (*Output, error) {
        e, ok := tx.Entries[id]
        if !ok || e == nil {
index 4c87978..d893922 100644 (file)
@@ -18,12 +18,13 @@ func (h *TxHeader) writeForHash(w io.Writer) {
 }
 
 // NewTxHeader creates an new TxHeader.
-func NewTxHeader(version uint64, resultIDs []*Hash, data *Hash, minTimeMS, maxTimeMS uint64) *TxHeader {
+func NewTxHeader(version, serializedSize uint64, resultIDs []*Hash, data *Hash, minTimeMS, maxTimeMS uint64) *TxHeader {
        return &TxHeader{
-               Version:   version,
-               ResultIds: resultIDs,
-               Data:      data,
-               MinTimeMs: minTimeMS,
-               MaxTimeMs: maxTimeMS,
+               Version:        version,
+               SerializedSize: serializedSize,
+               ResultIds:      resultIDs,
+               Data:           data,
+               MinTimeMs:      minTimeMS,
+               MaxTimeMs:      maxTimeMS,
        }
 }
index ec7e070..fcb011e 100644 (file)
@@ -5,14 +5,12 @@ import (
        "fmt"
        "time"
 
-//     "github.com/blockchain/crypto/ed25519"
        "github.com/bytom/errors"
        "github.com/bytom/log"
        "github.com/bytom/protocol/bc"
        "github.com/bytom/protocol/bc/legacy"
        "github.com/bytom/protocol/state"
        "github.com/bytom/protocol/validation"
-//     "github.com/blockchain/protocol/vm/vmutil"
 )
 
 // maxBlockTxs limits the number of transactions
@@ -38,7 +36,7 @@ var (
 
 // GetBlock returns the block at the given height, if there is one,
 // otherwise it returns an error.
-func (c *Chain) GetBlock(ctx context.Context, height uint64) (*legacy.Block, error) {
+func (c *Chain) GetBlock(height uint64) (*legacy.Block, error) {
        return c.store.GetBlock(height)
 }
 
@@ -68,9 +66,7 @@ func (c *Chain) GenerateBlock(ctx context.Context, prev *legacy.Block, snapshot
                        Height:            prev.Height + 1,
                        PreviousBlockHash: prev.Hash(),
                        TimestampMS:       timestampMS,
-                       BlockCommitment: legacy.BlockCommitment{
-                               ConsensusProgram: prev.ConsensusProgram,
-                       },
+                       BlockCommitment:   legacy.BlockCommitment{},
                },
        }
 
@@ -128,13 +124,10 @@ func (c *Chain) GenerateBlock(ctx context.Context, prev *legacy.Block, snapshot
 func (c *Chain) ValidateBlock(block, prev *legacy.Block) error {
        blockEnts := legacy.MapBlock(block)
        prevEnts := legacy.MapBlock(prev)
-       err := validation.ValidateBlock(blockEnts, prevEnts, c.InitialBlockHash, c.ValidateTx)
+       err := validation.ValidateBlock(blockEnts, prevEnts)
        if err != nil {
                return errors.Sub(ErrBadBlock, err)
        }
-       if block.Height > 1 {
-               err = validation.ValidateBlockSig(blockEnts, prevEnts.NextConsensusProgram)
-       }
        return errors.Sub(ErrBadBlock, err)
 }
 
@@ -223,50 +216,23 @@ func (c *Chain) setHeight(h uint64) {
        c.state.cond.Broadcast()
 }
 
-// ValidateBlockForSig performs validation on an incoming _unsigned_
-// block in preparation for signing it. By definition it does not
-// execute the consensus program.
-func (c *Chain) ValidateBlockForSig(ctx context.Context, block *legacy.Block) error {
-       var prev *legacy.Block
-
-       if block.Height > 1 {
-               var err error
-               prev, err = c.GetBlock(ctx, block.Height-1)
-               if err != nil {
-                       return errors.Wrap(err, "getting previous block")
-               }
-       }
-
-       err := validation.ValidateBlock(legacy.MapBlock(block), legacy.MapBlock(prev), c.InitialBlockHash, c.ValidateTx)
-       return errors.Sub(ErrBadBlock, err)
-}
-
-func NewInitialBlock(/*pubkeys []ed25519.PublicKey, nSigs int, timestamp time.Time*/) (*legacy.Block, error) {
+func NewInitialBlock(timestamp time.Time) (*legacy.Block, error) {
        // TODO(kr): move this into a lower-level package (e.g. chain/protocol/bc)
        // so that other packages (e.g. chain/protocol/validation) unit tests can
        // call this function.
-
-/*
-       script, err := vmutil.BlockMultiSigProgram(pubkeys, nSigs)
-       if err != nil {
-               return nil, err
-       }
-
        root, err := bc.MerkleRoot(nil) // calculate the zero value of the tx merkle root
        if err != nil {
                return nil, errors.Wrap(err, "calculating zero value of tx merkle root")
        }
-    */
 
        b := &legacy.Block{
                BlockHeader: legacy.BlockHeader{
                        Version:     1,
                        Height:      1,
-                       //TimestampMS: bc.Millis(timestamp),
-                       /*BlockCommitment: legacy.BlockCommitment{
+                       TimestampMS: bc.Millis(timestamp),
+                       BlockCommitment: legacy.BlockCommitment{
                                TransactionsMerkleRoot: root,
-                               ConsensusProgram:       script,
-                       },*/
+                       },
                },
        }
        return b, nil
index f027851..690dbb5 100644 (file)
@@ -1,18 +1,6 @@
 package protocol
 
-import (
-       "context"
-       "encoding/hex"
-       "testing"
-       "time"
-
-       "github.com/bytom/protocol/bc"
-       "github.com/bytom/protocol/bc/legacy"
-       "github.com/bytom/protocol/prottest/memstore"
-       "github.com/bytom/protocol/state"
-       "github.com/bytom/testutil"
-)
-
+/*
 func TestGetBlock(t *testing.T) {
        ctx := context.Background()
 
@@ -182,7 +170,6 @@ func TestGenerateBlock(t *testing.T) {
                        BlockCommitment: legacy.BlockCommitment{
                                TransactionsMerkleRoot: wantTxRoot,
                                AssetsMerkleRoot:       wantAssetsRoot,
-                               ConsensusProgram:       b1.ConsensusProgram,
                        },
                },
                Transactions: txs,
@@ -193,24 +180,6 @@ func TestGenerateBlock(t *testing.T) {
        }
 }
 
-func TestValidateBlockForSig(t *testing.T) {
-       initialBlock, err := NewInitialBlock(testutil.TestPubs, 1, time.Now())
-       if err != nil {
-               t.Fatal("unexpected error ", err)
-       }
-
-       ctx := context.Background()
-       c, err := NewChain(ctx, initialBlock.Hash(), memstore.New(), nil)
-       if err != nil {
-               t.Fatal("unexpected error ", err)
-       }
-
-       err = c.ValidateBlockForSig(ctx, initialBlock)
-       if err != nil {
-               t.Error("unexpected error ", err)
-       }
-}
-
 // newTestChain returns a new Chain using memstore for storage,
 // along with an initial block b1 (with a 0/0 multisig program).
 // It commits b1 before returning.
@@ -219,7 +188,7 @@ func newTestChain(tb testing.TB, ts time.Time) (c *Chain, b1 *legacy.Block) {
 
        var err error
 
-       b1, err = NewInitialBlock(nil, 0, ts)
+       b1, err = NewInitialBlock(ts)
        if err != nil {
                testutil.FatalErr(tb, err)
        }
@@ -274,4 +243,4 @@ func mustDecodeHash(s string) (h bc.Hash) {
                panic(err)
        }
        return h
-}
+}*/
index c558d41..4d5cba4 100644 (file)
@@ -70,7 +70,7 @@ func NewChain(tb testing.TB, opts ...Option) *protocol.Chain {
        }
 
        ctx := context.Background()
-       b1, err := protocol.NewInitialBlock(conf.pubkeys, conf.quorum, time.Now())
+       b1, err := protocol.NewInitialBlock(time.Now())
        if err != nil {
                testutil.FatalErr(tb, err)
        }
@@ -96,8 +96,7 @@ func NewChain(tb testing.TB, opts ...Option) *protocol.Chain {
 
 // Initial returns the provided Chain's initial block.
 func Initial(tb testing.TB, c *protocol.Chain) *legacy.Block {
-       ctx := context.Background()
-       b1, err := c.GetBlock(ctx, 1)
+       b1, err := c.GetBlock(1)
        if err != nil {
                testutil.FatalErr(tb, err)
        }
@@ -121,7 +120,7 @@ func BlockKeyPairs(c *protocol.Chain) ([]ed25519.PublicKey, []ed25519.PrivateKey
 // it makes an empty block.
 func MakeBlock(tb testing.TB, c *protocol.Chain, txs []*legacy.Tx) *legacy.Block {
        ctx := context.Background()
-       curBlock, err := c.GetBlock(ctx, c.Height())
+       curBlock, err := c.GetBlock(c.Height())
        if err != nil {
                testutil.FatalErr(tb, err)
        }
index 95bbf66..dd13c73 100644 (file)
@@ -1,8 +1,6 @@
 package prottest
 
-import "testing"
-
-func TestMakeBlock(t *testing.T) {
+/*func TestMakeBlock(t *testing.T) {
        c := NewChain(t)
        MakeBlock(t, c, nil)
        MakeBlock(t, c, nil)
@@ -12,4 +10,4 @@ func TestMakeBlock(t *testing.T) {
        if got := c.Height(); got != want {
                t.Errorf("c.Height() = %d want %d", got, want)
        }
-}
+}*/
index 208c401..eef1f8c 100644 (file)
@@ -26,14 +26,15 @@ func New() *MemStore {
        return &MemStore{Blocks: make(map[uint64]*legacy.Block)}
 }
 
-func (m *MemStore) Height(context.Context) (uint64, error) {
+func (m *MemStore) Height() uint64 {
        m.mu.Lock()
        defer m.mu.Unlock()
 
-       return uint64(len(m.Blocks)), nil
+       return uint64(len(m.Blocks))
+
 }
 
-func (m *MemStore) SaveBlock(ctx context.Context, b *legacy.Block) error {
+func (m *MemStore) SaveBlock(b *legacy.Block) error {
        m.mu.Lock()
        defer m.mu.Unlock()
 
@@ -54,7 +55,7 @@ func (m *MemStore) SaveSnapshot(ctx context.Context, height uint64, snapshot *st
        return nil
 }
 
-func (m *MemStore) GetBlock(ctx context.Context, height uint64) (*legacy.Block, error) {
+func (m *MemStore) GetBlock(height uint64) (*legacy.Block, error) {
        m.mu.Lock()
        defer m.mu.Unlock()
        b, ok := m.Blocks[height]
index 52a6a28..ef00b58 100644 (file)
@@ -1,21 +1,8 @@
 package protocol
 
-import (
-       "context"
-       "log"
-       "testing"
-       "time"
-
-       "github.com/bytom/protocol/bc"
-       "github.com/bytom/protocol/bc/legacy"
-       "github.com/bytom/protocol/prottest/memstore"
-       "github.com/bytom/protocol/state"
-       "github.com/bytom/testutil"
-)
-
-func TestRecoverSnapshotNoAdditionalBlocks(t *testing.T) {
+/*func TestRecoverSnapshotNoAdditionalBlocks(t *testing.T) {
        store := memstore.New()
-       b, err := NewInitialBlock(nil, 0, time.Now().Add(-time.Minute))
+       b, err := NewInitialBlock(time.Now().Add(-time.Minute))
        if err != nil {
                testutil.FatalErr(t, err)
        }
@@ -72,8 +59,7 @@ func createEmptyBlock(block *legacy.Block, snapshot *state.Snapshot) *legacy.Blo
                        BlockCommitment: legacy.BlockCommitment{
                                TransactionsMerkleRoot: root,
                                AssetsMerkleRoot:       snapshot.Tree.RootHash(),
-                               ConsensusProgram:       block.ConsensusProgram,
                        },
                },
        }
-}
+}*/
index 59e0859..1d4fa12 100644 (file)
@@ -78,16 +78,7 @@ func (s *Snapshot) ApplyTx(tx *bc.Tx) error {
                        return fmt.Errorf("conflicting nonce %x", n.Bytes())
                }
 
-               nonce, err := tx.Nonce(n)
-               if err != nil {
-                       return errors.Wrap(err, "applying nonce")
-               }
-               tr, err := tx.TimeRange(*nonce.TimeRangeId)
-               if err != nil {
-                       return errors.Wrap(err, "applying nonce")
-               }
-
-               s.Nonces[n] = tr.MaxTimeMs
+               s.Nonces[n] = tx.TxHeader.MaxTimeMs
        }
 
        // Remove spent outputs. Each output must be present.
index 15c2b24..3d8020d 100644 (file)
@@ -24,7 +24,8 @@ func (c *Chain) ValidateTx(tx *bc.Tx) error {
        var ok bool
        err, ok = c.prevalidated.lookup(tx.ID)
        if !ok {
-               err = validation.ValidateTx(tx, c.InitialBlockHash)
+               //TODO: fix the cache level things
+               _, err = validation.ValidateTx(tx, nil)
                c.prevalidated.cache(tx.ID, err)
        }
        return errors.Sub(ErrBadTx, err)
index 2052bc8..803598d 100644 (file)
@@ -1,7 +1,6 @@
 package protocol
 
 import (
-       "context"
        "fmt"
        "testing"
        "time"
@@ -11,13 +10,12 @@ import (
        "github.com/bytom/crypto/ed25519"
        "github.com/bytom/protocol/bc"
        "github.com/bytom/protocol/bc/legacy"
-       "github.com/bytom/protocol/state"
        "github.com/bytom/protocol/vm"
        "github.com/bytom/protocol/vm/vmutil"
        "github.com/bytom/testutil"
 )
 
-func TestBadMaxIssuanceWindow(t *testing.T) {
+/*func TestBadMaxIssuanceWindow(t *testing.T) {
        ctx := context.Background()
        c, b1 := newTestChain(t, time.Now())
        c.MaxIssuanceWindow = time.Second
@@ -31,7 +29,7 @@ func TestBadMaxIssuanceWindow(t *testing.T) {
        if len(got.Transactions) != 0 {
                t.Error("expected issuance past max issuance window to be rejected")
        }
-}
+}*/
 
 type testDest struct {
        privKey ed25519.PrivateKey
index 91f9d13..bf62804 100644 (file)
@@ -2,100 +2,15 @@ package validation
 
 import (
        "testing"
-       "time"
 
        "github.com/bytom/protocol/bc"
        "github.com/bytom/protocol/bc/legacy"
-       "github.com/bytom/protocol/vm"
-       "github.com/bytom/protocol/vm/vmutil"
 )
 
-func TestValidateBlock1(t *testing.T) {
-       b1 := newInitialBlock(t)
-       err := ValidateBlock(b1, nil, b1.ID, dummyValidateTx)
-       if err != nil {
-               t.Errorf("ValidateBlock(%v, nil) = %v, want nil", b1, err)
-       }
-}
-
-func TestValidateBlock1Err(t *testing.T) {
-       b1 := newInitialBlock(t)
-       transactionsRoot := bc.NewHash([32]byte{1})
-       b1.TransactionsRoot = &transactionsRoot // make b1 be invalid
-       err := ValidateBlock(b1, nil, b1.ID, dummyValidateTx)
-       if err == nil {
-               t.Errorf("ValidateBlock(%v, nil) = nil, want error", b1)
-       }
-}
-
-func TestValidateBlock2(t *testing.T) {
-       b1 := newInitialBlock(t)
-       b2 := generate(t, b1)
-       err := ValidateBlock(b2, b1, b2.ID, dummyValidateTx)
-       if err != nil {
-               t.Errorf("ValidateBlock(%v, %v) = %v, want nil", b2, b1, err)
-       }
-}
-
-func TestValidateBlock2Err(t *testing.T) {
-       b1 := newInitialBlock(t)
-       b2 := generate(t, b1)
-       transactionsRoot := bc.NewHash([32]byte{1})
-       b2.TransactionsRoot = &transactionsRoot // make b2 be invalid
-       err := ValidateBlock(b2, b1, b2.ID, dummyValidateTx)
-       if err == nil {
-               t.Errorf("ValidateBlock(%v, %v) = nil, want error", b2, b1)
-       }
-}
-
-func TestValidateBlockSig2(t *testing.T) {
-       b1 := newInitialBlock(t)
-       b2 := generate(t, b1)
-       err := ValidateBlockSig(b2, b1.NextConsensusProgram)
-       if err != nil {
-               t.Errorf("ValidateBlockSig(%v, %v) = %v, want nil", b2, b1, err)
-       }
-}
-
-func TestValidateBlockSig2Err(t *testing.T) {
-       b1 := newInitialBlock(t)
-       b2 := generate(t, b1)
-       prog := []byte{byte(vm.OP_FALSE)} // make b2 be invalid
-       err := ValidateBlockSig(b2, prog)
-       if err == nil {
-               t.Errorf("ValidateBlockSig(%v, %v) = nil, want error", b2, b1)
-       }
-}
-
 func dummyValidateTx(*bc.Tx) error {
        return nil
 }
 
-func newInitialBlock(tb testing.TB) *bc.Block {
-       script, err := vmutil.BlockMultiSigProgram(nil, 0)
-       if err != nil {
-               tb.Fatal(err)
-       }
-
-       root, err := bc.MerkleRoot(nil) // calculate the zero value of the tx merkle root
-       if err != nil {
-               tb.Fatal(err)
-       }
-
-       b := &legacy.Block{
-               BlockHeader: legacy.BlockHeader{
-                       Version:     1,
-                       Height:      1,
-                       TimestampMS: bc.Millis(time.Now()),
-                       BlockCommitment: legacy.BlockCommitment{
-                               TransactionsMerkleRoot: root,
-                               ConsensusProgram:       script,
-                       },
-               },
-       }
-       return legacy.MapBlock(b)
-}
-
 func generate(tb testing.TB, prev *bc.Block) *bc.Block {
        b := &legacy.Block{
                BlockHeader: legacy.BlockHeader{
@@ -103,9 +18,7 @@ func generate(tb testing.TB, prev *bc.Block) *bc.Block {
                        Height:            prev.Height + 1,
                        PreviousBlockHash: prev.ID,
                        TimestampMS:       prev.TimestampMs + 1,
-                       BlockCommitment: legacy.BlockCommitment{
-                               ConsensusProgram: prev.NextConsensusProgram,
-                       },
+                       BlockCommitment:   legacy.BlockCommitment{},
                },
        }
 
diff --git a/protocol/validation/fuzz_test.go b/protocol/validation/fuzz_test.go
deleted file mode 100644 (file)
index b0d6da1..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-package validation
-
-import (
-       "testing"
-
-       "github.com/bytom/protocol/bc"
-       "github.com/bytom/protocol/bc/legacy"
-)
-
-func TestFuzzAssetIdNilPointer(t *testing.T) {
-       const (
-               blockchainID = `50935a092ffad7ec9fbac4f4486db6c3b8cd5b9f51cf697248584dde286a7220`
-               input        = `07300730303030303030000001302b3030303030303030303030303030303030303030303030303030303030303030303030303030303030303000253030303030303030303030303030303030303030303030303030303030303030303030303000`
-       )
-
-       var testBlockchainID bc.Hash
-       err := testBlockchainID.UnmarshalText([]byte(blockchainID))
-       if err != nil {
-               t.Fatal(err)
-       }
-
-       var tx legacy.Tx
-       err = tx.UnmarshalText([]byte(input))
-       if err != nil {
-               t.Fatal(err)
-       }
-
-       ValidateTx(tx.Tx, testBlockchainID)
-}
index d58c11b..55b7740 100644 (file)
@@ -9,11 +9,64 @@ import (
        "github.com/bytom/protocol/vm"
 )
 
+const (
+       defaultGasLimit = int64(80000)
+       muxGasCost      = int64(10)
+       gasRate         = int64(1000)
+
+       maxTxSize    = uint64(1024)
+       maxBlockSzie = uint64(16384)
+)
+
+var BTMAssetID = &bc.AssetID{
+       V0: uint64(18446744073709551615),
+       V1: uint64(18446744073709551615),
+       V2: uint64(18446744073709551615),
+       V3: uint64(18446744073709551615),
+}
+
+type gasState struct {
+       gasLeft  int64
+       gasUsed  int64
+       BTMValue int64
+}
+
+func (g *gasState) setGas(BTMValue int64) error {
+       if BTMValue < 0 {
+               return errGasCalculate
+       }
+       g.BTMValue = BTMValue
+
+       if gasAmount, ok := checked.DivInt64(BTMValue, gasRate); ok {
+               if gasAmount == 0 {
+                       g.gasLeft = muxGasCost
+               } else if gasAmount < defaultGasLimit {
+                       g.gasLeft = gasAmount
+               }
+       } else {
+               return errGasCalculate
+       }
+       return nil
+}
+
+func (g *gasState) updateUsage(gasLeft int64) error {
+       if gasLeft < 0 {
+               return errGasCalculate
+       }
+       if gasUsed, ok := checked.SubInt64(g.gasLeft, gasLeft); ok {
+               g.gasUsed += gasUsed
+               g.gasLeft = gasLeft
+       } else {
+               return errGasCalculate
+       }
+       return nil
+}
+
 // validationState contains the context that must propagate through
 // the transaction graph when validating entries.
 type validationState struct {
        // The ID of the blockchain
-       blockchainID bc.Hash
+       block *bc.Block
 
        // The enclosing transaction object
        tx *bc.Tx
@@ -29,31 +82,36 @@ type validationState struct {
 
        // Memoized per-entry validation results
        cache map[bc.Hash]error
+
+       gas *gasState
 }
 
 var (
-       errBadTimeRange          = errors.New("bad time range")
-       errEmptyResults          = errors.New("transaction has no results")
-       errMismatchedAssetID     = errors.New("mismatched asset id")
-       errMismatchedBlock       = errors.New("mismatched block")
-       errMismatchedMerkleRoot  = errors.New("mismatched merkle root")
-       errMismatchedPosition    = errors.New("mismatched value source/dest positions")
-       errMismatchedReference   = errors.New("mismatched reference")
-       errMismatchedValue       = errors.New("mismatched value")
-       errMisorderedBlockHeight = errors.New("misordered block height")
-       errMisorderedBlockTime   = errors.New("misordered block time")
-       errMissingField          = errors.New("missing required field")
-       errNoPrevBlock           = errors.New("no previous block")
-       errNoSource              = errors.New("no source for value")
-       errNonemptyExtHash       = errors.New("non-empty extension hash")
-       errOverflow              = errors.New("arithmetic overflow/underflow")
-       errPosition              = errors.New("invalid source or destination position")
-       errTxVersion             = errors.New("invalid transaction version")
-       errUnbalanced            = errors.New("unbalanced")
-       errUntimelyTransaction   = errors.New("block timestamp outside transaction time range")
-       errVersionRegression     = errors.New("version regression")
-       errWrongBlockchain       = errors.New("wrong blockchain")
-       errZeroTime              = errors.New("timerange has one or two bounds set to zero")
+       errGasCalculate             = errors.New("gas usage calculate got a math error")
+       errEmptyResults             = errors.New("transaction has no results")
+       errMismatchedAssetID        = errors.New("mismatched asset id")
+       errMismatchedBlock          = errors.New("mismatched block")
+       errMismatchedMerkleRoot     = errors.New("mismatched merkle root")
+       errMismatchedPosition       = errors.New("mismatched value source/dest positions")
+       errMismatchedReference      = errors.New("mismatched reference")
+       errMismatchedValue          = errors.New("mismatched value")
+       errMisorderedBlockHeight    = errors.New("misordered block height")
+       errMisorderedBlockTime      = errors.New("misordered block time")
+       errMissingField             = errors.New("missing required field")
+       errNoGas                    = errors.New("no gas input")
+       errNoPrevBlock              = errors.New("no previous block")
+       errNoSource                 = errors.New("no source for value")
+       errNonemptyExtHash          = errors.New("non-empty extension hash")
+       errOverflow                 = errors.New("arithmetic overflow/underflow")
+       errPosition                 = errors.New("invalid source or destination position")
+       errTxVersion                = errors.New("invalid transaction version")
+       errUnbalanced               = errors.New("unbalanced")
+       errUntimelyTransaction      = errors.New("block timestamp outside transaction time range")
+       errVersionRegression        = errors.New("version regression")
+       errWrongBlockSize           = errors.New("block size is too big")
+       errWrongTransactionSize     = errors.New("transaction size is too big")
+       errWrongCoinbaseTransaction = errors.New("wrong coinbase transaction")
+       errWrongCoinbaseAsset       = errors.New("wrong coinbase asset id")
 )
 
 func checkValid(vs *validationState, e bc.Entry) (err error) {
@@ -69,13 +127,6 @@ func checkValid(vs *validationState, e bc.Entry) (err error) {
 
        switch e := e.(type) {
        case *bc.TxHeader:
-               // This does only part of the work of validating a tx header. The
-               // block-related parts of tx validation are in ValidateBlock.
-               if e.MaxTimeMs > 0 {
-                       if e.MaxTimeMs < e.MinTimeMs {
-                               return errors.WithDetailf(errBadTimeRange, "min time %d, max time %d", e.MinTimeMs, e.MaxTimeMs)
-                       }
-               }
 
                for i, resID := range e.ResultIds {
                        resultEntry := vs.tx.Entries[*resID]
@@ -97,29 +148,23 @@ func checkValid(vs *validationState, e bc.Entry) (err error) {
                        }
                }
 
-       case *bc.Mux:
-               err = vm.Verify(NewTxVMContext(vs.tx, e, e.Program, e.WitnessArguments))
-               if err != nil {
-                       return errors.Wrap(err, "checking mux program")
+       case *bc.Coinbase:
+               if vs.block == nil || len(vs.block.Transactions) == 0 || vs.block.Transactions[0] != vs.tx {
+                       return errWrongCoinbaseTransaction
                }
 
-               for i, src := range e.Sources {
-                       vs2 := *vs
-                       vs2.sourcePos = uint64(i)
-                       err = checkValidSrc(&vs2, src)
-                       if err != nil {
-                               return errors.Wrapf(err, "checking mux source %d", i)
-                       }
+               if *e.WitnessDestination.Value.AssetId != *BTMAssetID {
+                       return errWrongCoinbaseAsset
                }
-               for i, dest := range e.WitnessDestinations {
-                       vs2 := *vs
-                       vs2.destPos = uint64(i)
-                       err = checkValidDest(&vs2, dest)
-                       if err != nil {
-                               return errors.Wrapf(err, "checking mux destination %d", i)
-                       }
+
+               vs2 := *vs
+               vs2.destPos = 0
+               err = checkValidDest(&vs2, e.WitnessDestination)
+               if err != nil {
+                       return errors.Wrap(err, "checking coinbase destination")
                }
 
+       case *bc.Mux:
                parity := make(map[bc.AssetID]int64)
                for i, src := range e.Sources {
                        sum, ok := checked.AddInt64(parity[*src.Value.AssetId], int64(src.Value.Amount))
@@ -142,34 +187,57 @@ func checkValid(vs *validationState, e bc.Entry) (err error) {
                        parity[*dest.Value.AssetId] = diff
                }
 
+               if amount, ok := parity[*BTMAssetID]; ok {
+                       if err = vs.gas.setGas(amount); err != nil {
+                               return err
+                       }
+               } else {
+                       vs.gas.setGas(0)
+               }
+
                for assetID, amount := range parity {
-                       if amount != 0 {
+                       if amount != 0 && assetID != *BTMAssetID {
                                return errors.WithDetailf(errUnbalanced, "asset %x sources - destinations = %d (should be 0)", assetID.Bytes(), amount)
                        }
                }
 
+               gasLeft, err := vm.Verify(NewTxVMContext(vs, e, e.Program, e.WitnessArguments), vs.gas.gasLeft)
+               if err != nil {
+                       return errors.Wrap(err, "checking mux program")
+               }
+               if err = vs.gas.updateUsage(gasLeft); err != nil {
+                       return err
+               }
+
+               for i, src := range e.Sources {
+                       vs2 := *vs
+                       vs2.sourcePos = uint64(i)
+                       err = checkValidSrc(&vs2, src)
+                       if err != nil {
+                               return errors.Wrapf(err, "checking mux source %d", i)
+                       }
+               }
+               for i, dest := range e.WitnessDestinations {
+                       vs2 := *vs
+                       vs2.destPos = uint64(i)
+                       err = checkValidDest(&vs2, dest)
+                       if err != nil {
+                               return errors.Wrapf(err, "checking mux destination %d", i)
+                       }
+               }
+
                if vs.tx.Version == 1 && e.ExtHash != nil && !e.ExtHash.IsZero() {
                        return errNonemptyExtHash
                }
 
        case *bc.Nonce:
-               err = vm.Verify(NewTxVMContext(vs.tx, e, e.Program, e.WitnessArguments))
+               //TODO: add block heigh range check on the control program
+               gasLeft, err := vm.Verify(NewTxVMContext(vs, e, e.Program, e.WitnessArguments), vs.gas.gasLeft)
                if err != nil {
                        return errors.Wrap(err, "checking nonce program")
                }
-               tr, err := vs.tx.TimeRange(*e.TimeRangeId)
-               if err != nil {
-                       return errors.Wrap(err, "getting nonce timerange")
-               }
-               vs2 := *vs
-               vs2.entryID = *e.TimeRangeId
-               err = checkValid(&vs2, tr)
-               if err != nil {
-                       return errors.Wrap(err, "checking nonce timerange")
-               }
-
-               if tr.MinTimeMs == 0 || tr.MaxTimeMs == 0 {
-                       return errZeroTime
+               if err = vs.gas.updateUsage(gasLeft); err != nil {
+                       return err
                }
 
                if vs.tx.Version == 1 && e.ExtHash != nil && !e.ExtHash.IsZero() {
@@ -200,22 +268,7 @@ func checkValid(vs *validationState, e bc.Entry) (err error) {
                        return errNonemptyExtHash
                }
 
-       case *bc.TimeRange:
-               if e.MinTimeMs > vs.tx.MinTimeMs {
-                       return errBadTimeRange
-               }
-               if e.MaxTimeMs > 0 && e.MaxTimeMs < vs.tx.MaxTimeMs {
-                       return errBadTimeRange
-               }
-               if vs.tx.Version == 1 && e.ExtHash != nil && !e.ExtHash.IsZero() {
-                       return errNonemptyExtHash
-               }
-
        case *bc.Issuance:
-               if *e.WitnessAssetDefinition.InitialBlockId != vs.blockchainID {
-                       return errors.WithDetailf(errWrongBlockchain, "current blockchain %x, asset defined on blockchain %x", vs.blockchainID.Bytes(), e.WitnessAssetDefinition.InitialBlockId.Bytes())
-               }
-
                computedAssetID := e.WitnessAssetDefinition.ComputeAssetID()
                if computedAssetID != *e.Value.AssetId {
                        return errors.WithDetailf(errMismatchedAssetID, "asset ID is %x, issuance wants %x", computedAssetID.Bytes(), e.Value.AssetId.Bytes())
@@ -226,10 +279,13 @@ func checkValid(vs *validationState, e bc.Entry) (err error) {
                        return errors.Wrapf(bc.ErrMissingEntry, "entry for issuance anchor %x not found", e.AnchorId.Bytes())
                }
 
-               err = vm.Verify(NewTxVMContext(vs.tx, e, e.WitnessAssetDefinition.IssuanceProgram, e.WitnessArguments))
+               gasLeft, err := vm.Verify(NewTxVMContext(vs, e, e.WitnessAssetDefinition.IssuanceProgram, e.WitnessArguments), vs.gas.gasLeft)
                if err != nil {
                        return errors.Wrap(err, "checking issuance program")
                }
+               if err = vs.gas.updateUsage(gasLeft); err != nil {
+                       return err
+               }
 
                var anchored *bc.Hash
                switch a := anchor.(type) {
@@ -276,10 +332,13 @@ func checkValid(vs *validationState, e bc.Entry) (err error) {
                if err != nil {
                        return errors.Wrap(err, "getting spend prevout")
                }
-               err = vm.Verify(NewTxVMContext(vs.tx, e, spentOutput.ControlProgram, e.WitnessArguments))
+               gasLeft, err := vm.Verify(NewTxVMContext(vs, e, spentOutput.ControlProgram, e.WitnessArguments), vs.gas.gasLeft)
                if err != nil {
                        return errors.Wrap(err, "checking control program")
                }
+               if err = vs.gas.updateUsage(gasLeft); err != nil {
+                       return err
+               }
 
                eq, err := spentOutput.Source.Value.Equal(e.WitnessDestination.Value)
                if err != nil {
@@ -314,13 +373,6 @@ func checkValid(vs *validationState, e bc.Entry) (err error) {
        return nil
 }
 
-func checkValidBlockHeader(bh *bc.BlockHeader) error {
-       if bh.Version == 1 && bh.ExtHash != nil && !bh.ExtHash.IsZero() {
-               return errNonemptyExtHash
-       }
-       return nil
-}
-
 func checkValidSrc(vstate *validationState, vs *bc.ValueSource) error {
        if vs == nil {
                return errors.Wrap(errMissingField, "empty value source")
@@ -345,6 +397,11 @@ func checkValidSrc(vstate *validationState, vs *bc.ValueSource) error {
 
        var dest *bc.ValueDestination
        switch ref := e.(type) {
+       case *bc.Coinbase:
+               if vs.Position != 0 {
+                       return errors.Wrapf(errPosition, "invalid position %d for coinbase source", vs.Position)
+               }
+               dest = ref.WitnessDestination
        case *bc.Issuance:
                if vs.Position != 0 {
                        return errors.Wrapf(errPosition, "invalid position %d for issuance source", vs.Position)
@@ -364,7 +421,7 @@ func checkValidSrc(vstate *validationState, vs *bc.ValueSource) error {
                dest = ref.WitnessDestinations[vs.Position]
 
        default:
-               return errors.Wrapf(bc.ErrEntryType, "value source is %T, should be issuance, spend, or mux", e)
+               return errors.Wrapf(bc.ErrEntryType, "value source is %T, should be coinbase, issuance, spend, or mux", e)
        }
 
        if dest.Ref == nil || *dest.Ref != vstate.entryID {
@@ -444,16 +501,9 @@ func checkValidDest(vs *validationState, vd *bc.ValueDestination) error {
        return nil
 }
 
-// ValidateBlockSig runs the consensus program prog on b.
-func ValidateBlockSig(b *bc.Block, prog []byte) error {
-       vmContext := newBlockVMContext(b, prog, b.WitnessArguments)
-       err := vm.Verify(vmContext)
-       return errors.Wrap(err, "evaluating previous block's next consensus program")
-}
-
 // ValidateBlock validates a block and the transactions within.
 // It does not run the consensus program; for that, see ValidateBlockSig.
-func ValidateBlock(b, prev *bc.Block, initialBlockID bc.Hash, validateTx func(*bc.Tx) error) error {
+func ValidateBlock(b, prev *bc.Block) error {
        if b.Height > 1 {
                if prev == nil {
                        return errors.WithDetailf(errNoPrevBlock, "height %d", b.Height)
@@ -464,11 +514,11 @@ func ValidateBlock(b, prev *bc.Block, initialBlockID bc.Hash, validateTx func(*b
                }
        }
 
-       err := checkValidBlockHeader(b.BlockHeader)
-       if err != nil {
-               return errors.Wrap(err, "checking block header")
+       if b.BlockHeader.SerializedSize > maxBlockSzie {
+               return errWrongBlockSize
        }
 
+       coinbaseValue := b.BlockHeader.BlockSubsidy()
        for i, tx := range b.Transactions {
                if b.Version == 1 && tx.Version != 1 {
                        return errors.WithDetailf(errTxVersion, "block version %d, transaction version %d", b.Version, tx.Version)
@@ -480,10 +530,22 @@ func ValidateBlock(b, prev *bc.Block, initialBlockID bc.Hash, validateTx func(*b
                        return errors.WithDetailf(errUntimelyTransaction, "block timestamp %d, transaction time range %d-%d", b.TimestampMs, tx.MinTimeMs, tx.MaxTimeMs)
                }
 
-               err = validateTx(tx)
+               txBTMValue, err := ValidateTx(tx, b)
                if err != nil {
                        return errors.Wrapf(err, "validity of transaction %d of %d", i, len(b.Transactions))
                }
+               coinbaseValue += uint64(txBTMValue)
+       }
+
+       // check the coinbase output entry value
+       cbTx := b.Transactions[0]
+       cbOutput := cbTx.Entries[*cbTx.TxHeader.ResultIds[0]]
+       if cbOutput, ok := cbOutput.(*bc.Output); ok {
+               if cbOutput.Source.Value.Amount != coinbaseValue {
+                       return errWrongCoinbaseTransaction
+               }
+       } else {
+               return errWrongCoinbaseTransaction
        }
 
        txRoot, err := bc.MerkleRoot(b.Transactions)
@@ -505,6 +567,7 @@ func validateBlockAgainstPrev(b, prev *bc.Block) error {
        if b.Height != prev.Height+1 {
                return errors.WithDetailf(errMisorderedBlockHeight, "previous block height %d, current block height %d", prev.Height, b.Height)
        }
+
        if prev.ID != *b.PreviousBlockId {
                return errors.WithDetailf(errMismatchedBlock, "previous block ID %x, current block wants %x", prev.ID.Bytes(), b.PreviousBlockId.Bytes())
        }
@@ -515,13 +578,22 @@ func validateBlockAgainstPrev(b, prev *bc.Block) error {
 }
 
 // ValidateTx validates a transaction.
-func ValidateTx(tx *bc.Tx, initialBlockID bc.Hash) error {
-       vs := &validationState{
-               blockchainID: initialBlockID,
-               tx:           tx,
-               entryID:      tx.ID,
+func ValidateTx(tx *bc.Tx, block *bc.Block) (int64, error) {
+       if tx.TxHeader.SerializedSize > maxTxSize {
+               return 0, errWrongTransactionSize
+       }
 
+       //TODO: handle the gas limit
+       vs := &validationState{
+               block:   block,
+               tx:      tx,
+               entryID: tx.ID,
+               gas: &gasState{
+                       gasLeft: defaultGasLimit,
+               },
                cache: make(map[bc.Hash]error),
        }
-       return checkValid(vs, tx.TxHeader)
+
+       err := checkValid(vs, tx.TxHeader)
+       return vs.gas.BTMValue, err
 }
index f8f99ea..4eb91cc 100644 (file)
@@ -9,8 +9,8 @@ import (
        "github.com/bytom/crypto/sha3pool"
        "github.com/bytom/errors"
        "github.com/bytom/protocol/bc"
-       "github.com/bytom/protocol/bc/bctest"
        "github.com/bytom/protocol/bc/legacy"
+       "github.com/bytom/protocol/validation"
        "github.com/bytom/protocol/vm"
        "github.com/bytom/testutil"
 
@@ -22,6 +22,106 @@ func init() {
        spew.Config.DisableMethods = true
 }
 
+func TestGasStatus(t *testing.T) {
+       cases := []struct {
+               input  *gasState
+               output *gasState
+               f      func(*gasState) error
+               err    error
+       }{
+               {
+                       input: &gasState{
+                               gasLeft:  10000,
+                               gasUsed:  0,
+                               BTMValue: 0,
+                       },
+                       output: &gasState{
+                               gasLeft:  10000 / gasRate,
+                               gasUsed:  0,
+                               BTMValue: 10000,
+                       },
+                       f: func(input *gasState) error {
+                               return input.setGas(10000)
+                       },
+                       err: nil,
+               },
+               {
+                       input: &gasState{
+                               gasLeft:  10000,
+                               gasUsed:  0,
+                               BTMValue: 0,
+                       },
+                       output: &gasState{
+                               gasLeft:  10000,
+                               gasUsed:  0,
+                               BTMValue: 0,
+                       },
+                       f: func(input *gasState) error {
+                               return input.setGas(-10000)
+                       },
+                       err: errGasCalculate,
+               },
+               {
+                       input: &gasState{
+                               gasLeft:  defaultGasLimit,
+                               gasUsed:  0,
+                               BTMValue: 0,
+                       },
+                       output: &gasState{
+                               gasLeft:  defaultGasLimit,
+                               gasUsed:  0,
+                               BTMValue: 80000000000,
+                       },
+                       f: func(input *gasState) error {
+                               return input.setGas(80000000000)
+                       },
+                       err: nil,
+               },
+               {
+                       input: &gasState{
+                               gasLeft:  10000,
+                               gasUsed:  0,
+                               BTMValue: 0,
+                       },
+                       output: &gasState{
+                               gasLeft:  10000,
+                               gasUsed:  0,
+                               BTMValue: 0,
+                       },
+                       f: func(input *gasState) error {
+                               return input.updateUsage(-1)
+                       },
+                       err: errGasCalculate,
+               },
+               {
+                       input: &gasState{
+                               gasLeft:  10000,
+                               gasUsed:  0,
+                               BTMValue: 0,
+                       },
+                       output: &gasState{
+                               gasLeft:  9999,
+                               gasUsed:  1,
+                               BTMValue: 0,
+                       },
+                       f: func(input *gasState) error {
+                               return input.updateUsage(9999)
+                       },
+                       err: nil,
+               },
+       }
+
+       for _, c := range cases {
+               err := c.f(c.input)
+
+               if err != c.err {
+                       t.Errorf("got error %s, want %s", err, c.err)
+               } else if *c.input != *c.output {
+                       t.Errorf("got gasStatus %s, want %s;", c.input, c.output)
+               }
+       }
+}
+
 func TestTxValidation(t *testing.T) {
        var (
                tx      *bc.Tx
@@ -128,46 +228,6 @@ func TestTxValidation(t *testing.T) {
                        },
                },
                {
-                       desc: "nonce timerange misordered",
-                       f: func() {
-                               iss := txIssuance(t, tx, 0)
-                               nonce := tx.Entries[*iss.AnchorId].(*bc.Nonce)
-                               tr := tx.Entries[*nonce.TimeRangeId].(*bc.TimeRange)
-                               tr.MinTimeMs = tr.MaxTimeMs + 1
-                       },
-                       err: errBadTimeRange,
-               },
-               {
-                       desc: "nonce timerange disagrees with tx timerange",
-                       f: func() {
-                               iss := txIssuance(t, tx, 0)
-                               nonce := tx.Entries[*iss.AnchorId].(*bc.Nonce)
-                               tr := tx.Entries[*nonce.TimeRangeId].(*bc.TimeRange)
-                               tr.MaxTimeMs = tx.MaxTimeMs - 1
-                       },
-                       err: errBadTimeRange,
-               },
-               {
-                       desc: "nonce timerange exthash nonempty",
-                       f: func() {
-                               iss := txIssuance(t, tx, 0)
-                               nonce := tx.Entries[*iss.AnchorId].(*bc.Nonce)
-                               tr := tx.Entries[*nonce.TimeRangeId].(*bc.TimeRange)
-                               tr.ExtHash = newHash(1)
-                       },
-                       err: errNonemptyExtHash,
-               },
-               {
-                       desc: "nonce timerange exthash nonempty, but that's OK",
-                       f: func() {
-                               tx.Version = 2
-                               iss := txIssuance(t, tx, 0)
-                               nonce := tx.Entries[*iss.AnchorId].(*bc.Nonce)
-                               tr := tx.Entries[*nonce.TimeRangeId].(*bc.TimeRange)
-                               tr.ExtHash = newHash(1)
-                       },
-               },
-               {
                        desc: "mismatched output source / mux dest position",
                        f: func() {
                                tx.Entries[*tx.ResultIds[0]].(*bc.Output).Source.Position = 1
@@ -226,13 +286,6 @@ func TestTxValidation(t *testing.T) {
                        },
                },
                {
-                       desc: "misordered tx time range",
-                       f: func() {
-                               tx.MinTimeMs = tx.MaxTimeMs + 1
-                       },
-                       err: errBadTimeRange,
-               },
-               {
                        desc: "empty tx results",
                        f: func() {
                                tx.ResultIds = nil
@@ -261,21 +314,6 @@ func TestTxValidation(t *testing.T) {
                        },
                },
                {
-                       desc: "wrong blockchain",
-                       f: func() {
-                               vs.blockchainID = *newHash(2)
-                       },
-                       err: errWrongBlockchain,
-               },
-               {
-                       desc: "issuance asset ID mismatch",
-                       f: func() {
-                               iss := txIssuance(t, tx, 0)
-                               iss.Value.AssetId = newAssetID(1)
-                       },
-                       err: errMismatchedAssetID,
-               },
-               {
                        desc: "issuance program failure",
                        f: func() {
                                iss := txIssuance(t, tx, 0)
@@ -342,10 +380,14 @@ func TestTxValidation(t *testing.T) {
                        fixture = sample(t, nil)
                        tx = legacy.NewTx(*fixture.tx).Tx
                        vs = &validationState{
-                               blockchainID: fixture.initialBlockID,
-                               tx:           tx,
-                               entryID:      tx.ID,
-                               cache:        make(map[bc.Hash]error),
+                               block:   mockBlock(),
+                               tx:      tx,
+                               entryID: tx.ID,
+                               gas: &gasState{
+                                       gasLeft: int64(80000),
+                                       gasUsed: 0,
+                               },
+                               cache: make(map[bc.Hash]error),
                        }
                        out := tx.Entries[*tx.ResultIds[0]].(*bc.Output)
                        muxID := out.Source.Ref
@@ -355,6 +397,7 @@ func TestTxValidation(t *testing.T) {
                                c.f()
                        }
                        err := checkValid(vs, tx.TxHeader)
+
                        if rootErr(err) != c.err {
                                t.Errorf("got error %s, want %s; validationState is:\n%s", err, c.err, spew.Sdump(vs))
                        }
@@ -362,20 +405,116 @@ func TestTxValidation(t *testing.T) {
        }
 }
 
-func TestNoncelessIssuance(t *testing.T) {
-       tx := bctest.NewIssuanceTx(t, bc.EmptyStringHash, func(tx *legacy.Tx) {
-               // Remove the issuance nonce.
-               tx.Inputs[0].TypedInput.(*legacy.IssuanceInput).Nonce = nil
+func TestValidateBlock(t *testing.T) {
+       cases := []struct {
+               block *bc.Block
+               err   error
+       }{
+               {
+                       block: &bc.Block{
+                               BlockHeader: &bc.BlockHeader{
+                                       Height: 1,
+                               },
+                               Transactions: []*bc.Tx{mockCoinbaseTx(624000000000)},
+                       },
+                       err: nil,
+               },
+               {
+                       block: &bc.Block{
+                               BlockHeader: &bc.BlockHeader{
+                                       Height: 1,
+                               },
+                               Transactions: []*bc.Tx{mockCoinbaseTx(1)},
+                       },
+                       err: errWrongCoinbaseTransaction,
+               },
+               {
+                       block: &bc.Block{
+                               BlockHeader: &bc.BlockHeader{
+                                       Height:         1,
+                                       SerializedSize: 88888888,
+                               },
+                               Transactions: []*bc.Tx{mockCoinbaseTx(1)},
+                       },
+                       err: errWrongBlockSize,
+               },
+       }
+
+       for _, c := range cases {
+               txRoot, err := bc.MerkleRoot(c.block.Transactions)
+               if err != nil {
+                       t.Errorf("computing transaction merkle root", err)
+                       continue
+               }
+               c.block.TransactionsRoot = &txRoot
+               err = ValidateBlock(c.block, nil)
+
+               if rootErr(err) != c.err {
+                       t.Errorf("got error %s, want %s", err, c.err)
+               }
+       }
+}
+
+func TestCoinbase(t *testing.T) {
+       CbTx := mockCoinbaseTx(5000000000)
+       errCbTx := legacy.MapTx(&legacy.TxData{
+               Outputs: []*legacy.TxOutput{
+                       legacy.NewTxOutput(bc.AssetID{
+                               V0: uint64(18446744073709551611),
+                               V1: uint64(18446744073709551615),
+                               V2: uint64(18446744073709551615),
+                               V3: uint64(18446744073709551615),
+                       }, 800000000000, []byte{1}, nil),
+               },
        })
+       cases := []struct {
+               block *bc.Block
+               tx    *bc.Tx
+               err   error
+       }{
+               {
+                       block: &bc.Block{
+                               BlockHeader: &bc.BlockHeader{
+                                       Height: 666,
+                               },
+                               Transactions: []*bc.Tx{errCbTx},
+                       },
+                       tx:  CbTx,
+                       err: errWrongCoinbaseTransaction,
+               },
+               {
+                       block: &bc.Block{
+                               BlockHeader: &bc.BlockHeader{
+                                       Height: 666,
+                               },
+                               Transactions: []*bc.Tx{CbTx},
+                       },
+                       tx:  CbTx,
+                       err: nil,
+               },
+               {
+                       block: &bc.Block{
+                               BlockHeader: &bc.BlockHeader{
+                                       Height: 666,
+                               },
+                               Transactions: []*bc.Tx{errCbTx},
+                       },
+                       tx:  errCbTx,
+                       err: errWrongCoinbaseAsset,
+               },
+       }
+
+       for _, c := range cases {
+               _, err := ValidateTx(c.tx, c.block)
 
-       err := ValidateTx(legacy.MapTx(&tx.TxData), bc.EmptyStringHash)
-       if errors.Root(err) != bc.ErrMissingEntry {
-               t.Fatalf("got %s, want %s", err, bc.ErrMissingEntry)
+               if rootErr(err) != c.err {
+                       t.Errorf("got error %s, want %s", err, c.err)
+               }
        }
 }
 
 func TestBlockHeaderValid(t *testing.T) {
-       base := bc.NewBlockHeader(1, 1, &bc.Hash{}, 1, &bc.Hash{}, &bc.Hash{}, nil)
+       base := bc.NewBlockHeader(1, 1, &bc.Hash{}, 1, &bc.Hash{}, &bc.Hash{}, 0, 0)
        baseBytes, _ := proto.Marshal(base)
 
        var bh bc.BlockHeader
@@ -390,12 +529,6 @@ func TestBlockHeaderValid(t *testing.T) {
                                bh.Version = 2
                        },
                },
-               {
-                       f: func() {
-                               bh.ExtHash = newHash(1)
-                       },
-                       err: errNonemptyExtHash,
-               },
        }
 
        for i, c := range cases {
@@ -404,10 +537,6 @@ func TestBlockHeaderValid(t *testing.T) {
                        if c.f != nil {
                                c.f()
                        }
-                       err := checkValidBlockHeader(&bh)
-                       if err != c.err {
-                               t.Errorf("got error %s, want %s; bh is:\n%s", err, c.err, spew.Sdump(bh))
-                       }
                })
        }
 }
@@ -498,6 +627,9 @@ func sample(tb testing.TB, in *txFixture) *txFixture {
                        legacy.NewSpendInput(args2, *newHash(8), result.assetID, 40, 0, cp2, *newHash(9), []byte{10}),
                }
        }
+
+       result.txInputs = append(result.txInputs, mockGasTxInput())
+
        if len(result.txOutputs) == 0 {
                cp1, err := vm.Assemble("ADD 17 NUMEQUAL")
                if err != nil {
@@ -535,6 +667,26 @@ func sample(tb testing.TB, in *txFixture) *txFixture {
        return &result
 }
 
+func mockBlock() *bc.Block {
+       return &bc.Block{
+               BlockHeader: &bc.BlockHeader{
+                       Height: 666,
+               },
+       }
+}
+
+func mockCoinbaseTx(amount uint64) *bc.Tx {
+       return legacy.MapTx(&legacy.TxData{
+               Outputs: []*legacy.TxOutput{
+                       legacy.NewTxOutput(*BTMAssetID, amount, []byte{1}, nil),
+               },
+       })
+}
+
+func mockGasTxInput() *legacy.TxInput {
+       return legacy.NewSpendInput([][]byte{}, *newHash(8), *validation.BTMAssetID, 100000000, 0, []byte{byte(vm.OP_TRUE)}, *newHash(9), []byte{})
+}
+
 // Like errors.Root, but also unwraps vm.Error objects.
 func rootErr(e error) error {
        for {
index f61d1b3..096b477 100644 (file)
@@ -9,21 +9,10 @@ import (
        "github.com/bytom/protocol/vm"
 )
 
-func newBlockVMContext(block *bc.Block, prog []byte, args [][]byte) *vm.Context {
-       blockHash := block.ID.Bytes()
-       return &vm.Context{
-               VMVersion: 1,
-               Code:      prog,
-               Arguments: args,
-
-               BlockHash:            &blockHash,
-               BlockTimeMS:          &block.TimestampMs,
-               NextConsensusProgram: &block.NextConsensusProgram,
-       }
-}
-
-func NewTxVMContext(tx *bc.Tx, entry bc.Entry, prog *bc.Program, args [][]byte) *vm.Context {
+func NewTxVMContext(vs *validationState, entry bc.Entry, prog *bc.Program, args [][]byte) *vm.Context {
        var (
+               tx         = vs.tx
+               blockHeigh = vs.block.BlockHeader.GetHeight()
                numResults = uint64(len(tx.ResultIds))
                txData     = tx.Data.Bytes()
                entryID    = bc.EntryID(entry) // TODO(bobg): pass this in, don't recompute it
@@ -104,7 +93,8 @@ func NewTxVMContext(tx *bc.Tx, entry bc.Entry, prog *bc.Program, args [][]byte)
 
                EntryID: entryID.Bytes(),
 
-               TxVersion: &tx.Version,
+               TxVersion:  &tx.Version,
+               BlockHeigh: &blockHeigh,
 
                TxSigHash:     txSigHashFn,
                NumResults:    &numResults,
index c56663e..a56a511 100644 (file)
@@ -18,13 +18,8 @@ type Context struct {
 
        // TxVersion must be present when verifying transaction components
        // (such as spends and issuances).
-       TxVersion *uint64
-
-       // These fields must be present when verifying block headers.
-
-       BlockHash            *[]byte
-       BlockTimeMS          *uint64
-       NextConsensusProgram *[]byte
+       TxVersion  *uint64
+       BlockHeigh *uint64
 
        // Fields below this point are required by particular opcodes when
        // verifying transaction components.
index 68b5685..5aefcb4 100644 (file)
@@ -136,14 +136,3 @@ func opTxSigHash(vm *virtualMachine) error {
        }
        return vm.push(vm.context.TxSigHash(), false)
 }
-
-func opBlockHash(vm *virtualMachine) error {
-       err := vm.applyCost(1)
-       if err != nil {
-               return err
-       }
-       if vm.context.BlockHash == nil {
-               return ErrContext
-       }
-       return vm.push(*vm.context.BlockHash, false)
-}
index affa271..812e577 100644 (file)
@@ -6,10 +6,6 @@ import (
        "github.com/bytom/testutil"
 )
 
-var emptyBlockVMContext = &Context{
-       BlockHash: &[]uint8{0xf0, 0x85, 0x4f, 0x88, 0xb4, 0x89, 0x0, 0x99, 0x2f, 0xec, 0x40, 0x43, 0xf9, 0x65, 0xfa, 0x2, 0x9d, 0xeb, 0x8a, 0xd6, 0x93, 0xcf, 0x37, 0x11, 0xfe, 0x83, 0x9, 0xb3, 0x90, 0x6a, 0x5a, 0x86},
-}
-
 func TestCheckSig(t *testing.T) {
        cases := []struct {
                prog    string
@@ -399,29 +395,6 @@ func TestCryptoOps(t *testing.T) {
                        context:  &Context{},
                },
                wantErr: ErrRunLimitExceeded,
-       }, {
-               op: OP_BLOCKHASH,
-               startVM: &virtualMachine{
-                       runLimit: 50000,
-                       context:  emptyBlockVMContext,
-               },
-               wantVM: &virtualMachine{
-                       runLimit: 49959,
-                       dataStack: [][]byte{{
-                               240, 133, 79, 136, 180, 137, 0, 153,
-                               47, 236, 64, 67, 249, 101, 250, 2,
-                               157, 235, 138, 214, 147, 207, 55, 17,
-                               254, 131, 9, 179, 144, 106, 90, 134,
-                       }},
-                       context: emptyBlockVMContext,
-               },
-       }, {
-               op: OP_BLOCKHASH,
-               startVM: &virtualMachine{
-                       runLimit: 0,
-                       context:  emptyBlockVMContext,
-               },
-               wantErr: ErrRunLimitExceeded,
        }}
 
        hashOps := []Op{OP_SHA256, OP_SHA3}
diff --git a/protocol/vm/doc.go b/protocol/vm/doc.go
deleted file mode 100644 (file)
index 78b3940..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
-Package vm implements the VM described in Chain Protocol 1.
-
-The VM is for verifying transaction inputs and blocks. Accordingly
-there are two main entrypoints: VerifyTxInput and VerifyBlockHeader,
-both in vm.go. Each constructs a disposable VM object to perform its
-computation.
-
-For VerifyTxInput, the program to execute comes from the input
-commitment: either the prevout's control program, if it's a spend
-input; or the issuance program, if it's an issuance. For
-VerifyBlockHeader, the program to execute is the previous block's
-consensus program.  In all cases, the VM's data stack is first
-populated with witness data from the current object (transaction input
-or block).
-
-The program is interpreted byte-by-byte by the main loop in
-virtualMachine.run(). Most bytes are opcodes in one of the following categories:
-  - bitwise
-  - control
-  - crypto
-  - introspection
-  - numeric
-  - pushdata
-  - splice
-  - stack
-Each category has a corresponding .go file implementing those opcodes.
-
-Each instruction incurs some cost when executed. These costs are
-deducted from (and in some cases refunded to) a predefined run
-limit. Costs are tallied in two conceptual phases: "before" the
-instruction runs and "after." In practice, "before" charges are
-applied on the fly in the body of each opcode's implementation, and
-"after" charges are deferred until the instruction finishes, at which
-point the VM main loop applies the deferred charges. As such,
-functions that have associated costs (chiefly stack pushing and
-popping) include a "deferred" flag as an argument.
-*/
-package vm
index d70c03b..54def8e 100644 (file)
@@ -9,6 +9,7 @@ var (
        ErrDataStackUnderflow = errors.New("data stack underflow")
        ErrDisallowedOpcode   = errors.New("disallowed opcode")
        ErrDivZero            = errors.New("division by zero")
+       ErrFalseVMResult      = errors.New("false VM result")
        ErrLongProgram        = errors.New("program size exceeds maxint32")
        ErrRange              = errors.New("range error")
        ErrReturn             = errors.New("RETURN executed")
index d1a525c..02580b1 100644 (file)
@@ -184,26 +184,14 @@ func opNonce(vm *virtualMachine) error {
        return vm.push(*vm.context.AnchorID, true)
 }
 
-func opNextProgram(vm *virtualMachine) error {
+func opBlockHeigh(vm *virtualMachine) error {
        err := vm.applyCost(1)
        if err != nil {
                return err
        }
 
-       if vm.context.NextConsensusProgram == nil {
+       if vm.context.BlockHeigh == nil {
                return ErrContext
        }
-       return vm.push(*vm.context.NextConsensusProgram, true)
-}
-
-func opBlockTime(vm *virtualMachine) error {
-       err := vm.applyCost(1)
-       if err != nil {
-               return err
-       }
-
-       if vm.context.BlockTimeMS == nil {
-               return ErrContext
-       }
-       return vm.pushInt64(int64(*vm.context.BlockTimeMS), true)
+       return vm.pushInt64(int64(*vm.context.BlockHeigh), true)
 }
index 5e16df3..3214a22 100644 (file)
@@ -9,91 +9,6 @@ import (
        "github.com/bytom/testutil"
 )
 
-func TestNextProgram(t *testing.T) {
-       context := &Context{
-               NextConsensusProgram: &[]byte{1, 2, 3},
-       }
-
-       prog, err := Assemble("NEXTPROGRAM 0x010203 EQUAL")
-       if err != nil {
-               t.Fatal(err)
-       }
-       vm := &virtualMachine{
-               runLimit: 50000,
-               program:  prog,
-               context:  context,
-       }
-       err = vm.run()
-       if err != nil {
-               t.Errorf("got error %s, expected none", err)
-       }
-
-       prog, err = Assemble("NEXTPROGRAM 0x0102 EQUAL")
-       if err != nil {
-               t.Fatal(err)
-       }
-       vm = &virtualMachine{
-               runLimit: 50000,
-               program:  prog,
-               context:  context,
-       }
-       err = vm.run()
-       if err == nil && vm.falseResult() {
-               err = ErrFalseVMResult
-       }
-       switch err {
-       case nil:
-               t.Error("got ok result, expected failure")
-       case ErrFalseVMResult:
-               // ok
-       default:
-               t.Errorf("got error %s, expected ErrFalseVMResult", err)
-       }
-}
-
-func TestBlockTime(t *testing.T) {
-       var blockTimeMS uint64 = 3263826
-
-       prog, err := Assemble("BLOCKTIME 3263826 NUMEQUAL")
-       if err != nil {
-               t.Fatal(err)
-       }
-       vm := &virtualMachine{
-               runLimit: 50000,
-               program:  prog,
-               context:  &Context{BlockTimeMS: &blockTimeMS},
-       }
-       err = vm.run()
-       if err != nil {
-               t.Errorf("got error %s, expected none", err)
-       }
-       if vm.falseResult() {
-               t.Error("result is false, want success")
-       }
-
-       prog, err = Assemble("BLOCKTIME 3263827 NUMEQUAL")
-       if err != nil {
-               t.Fatal(err)
-       }
-       vm = &virtualMachine{
-               runLimit: 50000,
-               program:  prog,
-               context:  &Context{BlockTimeMS: &blockTimeMS},
-       }
-       err = vm.run()
-       if err == nil && vm.falseResult() {
-               err = ErrFalseVMResult
-       }
-       switch err {
-       case nil:
-               t.Error("got ok result, expected failure")
-       case ErrFalseVMResult:
-               // ok
-       default:
-               t.Errorf("got error %s, expected ErrFalseVMResult", err)
-       }
-}
-
 func TestOutputIDAndNonceOp(t *testing.T) {
        // arbitrary
        outputID := mustDecodeHex("0a60f9b12950c84c221012a808ef7782823b7e16b71fe2ba01811cda96a217df")
@@ -156,6 +71,49 @@ func TestOutputIDAndNonceOp(t *testing.T) {
        }
 }
 
+func TestBlockHeigh(t *testing.T) {
+       var blockHeigh uint64 = 6666
+
+       prog, err := Assemble("BLOCKHEIGH 6666 NUMEQUAL")
+       if err != nil {
+               t.Fatal(err)
+       }
+       vm := &virtualMachine{
+               runLimit: 50000,
+               program:  prog,
+               context:  &Context{BlockHeigh: &blockHeigh},
+       }
+       err = vm.run()
+       if err != nil {
+               t.Errorf("got error %s, expected none", err)
+       }
+       if vm.falseResult() {
+               t.Error("result is false, want success")
+       }
+
+       prog, err = Assemble("BLOCKHEIGH 7777 NUMEQUAL")
+       if err != nil {
+               t.Fatal(err)
+       }
+       vm = &virtualMachine{
+               runLimit: 50000,
+               program:  prog,
+               context:  &Context{BlockHeigh: &blockHeigh},
+       }
+       err = vm.run()
+       if err == nil && vm.falseResult() {
+               err = ErrFalseVMResult
+       }
+       switch err {
+       case nil:
+               t.Error("got ok result, expected failure")
+       case ErrFalseVMResult:
+               // ok
+       default:
+               t.Errorf("got error %s, expected ErrFalseVMResult", err)
+       }
+}
+
 func TestIntrospectionOps(t *testing.T) {
        // arbitrary
        entryID := mustDecodeHex("2e68d78cdeaa98944c12512cf9c719eb4881e9afb61e4b766df5f369aee6392c")
index 551dd79..d39d08e 100644 (file)
@@ -199,7 +199,6 @@ const (
        OP_CHECKSIG      Op = 0xac
        OP_CHECKMULTISIG Op = 0xad
        OP_TXSIGHASH     Op = 0xae
-       OP_BLOCKHASH     Op = 0xaf
 
        OP_CHECKOUTPUT Op = 0xc1
        OP_ASSET       Op = 0xc2
@@ -213,8 +212,7 @@ const (
        OP_ENTRYID     Op = 0xca
        OP_OUTPUTID    Op = 0xcb
        OP_NONCE       Op = 0xcc
-       OP_NEXTPROGRAM Op = 0xcd
-       OP_BLOCKTIME   Op = 0xce
+       OP_BLOCKHEIGH  Op = 0xcd
 )
 
 type opInfo struct {
@@ -311,7 +309,6 @@ var (
                OP_CHECKSIG:      {OP_CHECKSIG, "CHECKSIG", opCheckSig},
                OP_CHECKMULTISIG: {OP_CHECKMULTISIG, "CHECKMULTISIG", opCheckMultiSig},
                OP_TXSIGHASH:     {OP_TXSIGHASH, "TXSIGHASH", opTxSigHash},
-               OP_BLOCKHASH:     {OP_BLOCKHASH, "BLOCKHASH", opBlockHash},
 
                OP_CHECKOUTPUT: {OP_CHECKOUTPUT, "CHECKOUTPUT", opCheckOutput},
                OP_ASSET:       {OP_ASSET, "ASSET", opAsset},
@@ -325,8 +322,7 @@ var (
                OP_ENTRYID:     {OP_ENTRYID, "ENTRYID", opEntryID},
                OP_OUTPUTID:    {OP_OUTPUTID, "OUTPUTID", opOutputID},
                OP_NONCE:       {OP_NONCE, "NONCE", opNonce},
-               OP_NEXTPROGRAM: {OP_NEXTPROGRAM, "NEXTPROGRAM", opNextProgram},
-               OP_BLOCKTIME:   {OP_BLOCKTIME, "BLOCKTIME", opBlockTime},
+               OP_BLOCKHEIGH:  {OP_BLOCKHEIGH, "BLOCKHEIGH", opBlockHeigh},
        }
 
        opsByName map[string]opInfo
index 0961ff5..910672f 100644 (file)
@@ -9,8 +9,6 @@ import (
        "github.com/bytom/errors"
 )
 
-const initialRunLimit = 10000
-
 type virtualMachine struct {
        context *Context
 
@@ -33,14 +31,11 @@ type virtualMachine struct {
        altStack  [][]byte
 }
 
-// ErrFalseVMResult is one of the ways for a transaction to fail validation
-var ErrFalseVMResult = errors.New("false VM result")
-
 // TraceOut - if non-nil - will receive trace output during
 // execution.
 var TraceOut io.Writer
 
-func Verify(context *Context) (err error) {
+func Verify(context *Context, gasLimit int64) (gasLeft int64, err error) {
        defer func() {
                if r := recover(); r != nil {
                        if rErr, ok := r.(error); ok {
@@ -52,13 +47,13 @@ func Verify(context *Context) (err error) {
        }()
 
        if context.VMVersion != 1 {
-               return ErrUnsupportedVM
+               return gasLimit, ErrUnsupportedVM
        }
 
        vm := &virtualMachine{
                expansionReserved: context.TxVersion != nil && *context.TxVersion == 1,
                program:           context.Code,
-               runLimit:          initialRunLimit,
+               runLimit:          gasLimit,
                context:           context,
        }
 
@@ -66,7 +61,7 @@ func Verify(context *Context) (err error) {
        for i, arg := range args {
                err = vm.push(arg, false)
                if err != nil {
-                       return errors.Wrapf(err, "pushing initial argument %d", i)
+                       return vm.runLimit, errors.Wrapf(err, "pushing initial argument %d", i)
                }
        }
 
@@ -75,7 +70,7 @@ func Verify(context *Context) (err error) {
                err = ErrFalseVMResult
        }
 
-       return wrapErr(err, vm, args)
+       return vm.runLimit, wrapErr(err, vm, args)
 }
 
 // falseResult returns true iff the stack is empty or the top
@@ -198,6 +193,7 @@ func (vm *virtualMachine) top() ([]byte, error) {
 // positive cost decreases runlimit, negative cost increases it
 func (vm *virtualMachine) applyCost(n int64) error {
        if n > vm.runLimit {
+               vm.runLimit = 0
                return ErrRunLimitExceeded
        }
        vm.runLimit -= n
index a9b7432..f9ac2c1 100644 (file)
@@ -153,7 +153,7 @@ func doOKNotOK(t *testing.T, expectOK bool) {
                TraceOut = trace
                vm := &virtualMachine{
                        program:   prog,
-                       runLimit:  int64(initialRunLimit),
+                       runLimit:  int64(10000),
                        dataStack: append([][]byte{}, c.args...),
                }
                err = vm.run()
@@ -174,6 +174,7 @@ func TestVerifyTxInput(t *testing.T) {
        cases := []struct {
                vctx    *Context
                wantErr error
+               gasLeft int64
        }{
                {
                        vctx: &Context{
@@ -181,10 +182,12 @@ func TestVerifyTxInput(t *testing.T) {
                                Code:      []byte{byte(OP_ADD), byte(OP_5), byte(OP_NUMEQUAL)},
                                Arguments: [][]byte{{2}, {3}},
                        },
+                       gasLeft: 9986,
                },
                {
                        vctx:    &Context{VMVersion: 2},
                        wantErr: ErrUnsupportedVM,
+                       gasLeft: 10000,
                },
                {
                        vctx: &Context{
@@ -193,36 +196,18 @@ func TestVerifyTxInput(t *testing.T) {
                                Arguments: [][]byte{make([]byte, 50001)},
                        },
                        wantErr: ErrRunLimitExceeded,
+                       gasLeft: 0,
                },
        }
 
        for _, c := range cases {
-               gotErr := Verify(c.vctx)
+               gasLeft, gotErr := Verify(c.vctx, 10000)
                if errors.Root(gotErr) != c.wantErr {
                        t.Errorf("VerifyTxInput(%+v) err = %v want %v", c.vctx, gotErr, c.wantErr)
                }
-       }
-}
-
-func TestVerifyBlockHeader(t *testing.T) {
-       consensusProg := []byte{byte(OP_ADD), byte(OP_5), byte(OP_NUMEQUAL)}
-       context := &Context{
-               VMVersion: 1,
-               Code:      consensusProg,
-               Arguments: [][]byte{{2}, {3}},
-       }
-       gotErr := Verify(context)
-       if gotErr != nil {
-               t.Errorf("unexpected error: %v", gotErr)
-       }
-
-       context = &Context{
-               VMVersion: 1,
-               Arguments: [][]byte{make([]byte, 50000)},
-       }
-       gotErr = Verify(context)
-       if errors.Root(gotErr) != ErrRunLimitExceeded {
-               t.Error("expected block to exceed run limit")
+               if gasLeft != c.gasLeft {
+                       t.Errorf("VerifyTxInput(%+v) err = gasLeft doesn't match", c.vctx)
+               }
        }
 }
 
@@ -427,7 +412,7 @@ func TestVerifyTxInputQuickCheck(t *testing.T) {
                        // to a normal unit test.
                        MaxTimeMS: new(uint64),
                }
-               Verify(vctx)
+               Verify(vctx, 10000)
 
                return true
        }
@@ -449,14 +434,11 @@ func TestVerifyBlockHeaderQuickCheck(t *testing.T) {
                        }
                }()
                context := &Context{
-                       VMVersion:            1,
-                       Code:                 program,
-                       Arguments:            witnesses,
-                       BlockHash:            new([]byte),
-                       BlockTimeMS:          new(uint64),
-                       NextConsensusProgram: &[]byte{},
+                       VMVersion: 1,
+                       Code:      program,
+                       Arguments: witnesses,
                }
-               Verify(context)
+               Verify(context, 10000)
                return true
        }
        if err := quick.Check(f, nil); err != nil {
index 084d94b..ca43549 100644 (file)
@@ -15,25 +15,6 @@ func IsUnspendable(prog []byte) bool {
        return len(prog) > 0 && prog[0] == byte(vm.OP_FAIL)
 }
 
-// BlockMultiSigProgram returns a valid multisignature consensus
-// program where nrequired of the keys in pubkeys are required to have
-// signed the block for success.  An ErrBadValue will be returned if
-// nrequired is larger than the number of keys provided.  The result
-// is: BLOCKHASH <pubkey>... <nrequired> <npubkeys> CHECKMULTISIG
-func BlockMultiSigProgram(pubkeys []ed25519.PublicKey, nrequired int) ([]byte, error) {
-       err := checkMultiSigParams(int64(nrequired), int64(len(pubkeys)))
-       if err != nil {
-               return nil, err
-       }
-       builder := NewBuilder()
-       builder.AddOp(vm.OP_BLOCKHASH)
-       for _, key := range pubkeys {
-               builder.AddData(key)
-       }
-       builder.AddInt64(int64(nrequired)).AddInt64(int64(len(pubkeys))).AddOp(vm.OP_CHECKMULTISIG)
-       return builder.Build()
-}
-
 func ParseBlockMultiSigProgram(script []byte) ([]ed25519.PublicKey, int, error) {
        pops, err := vm.ParseProgram(script)
        if err != nil {
index a5f7590..ae6b47a 100644 (file)
@@ -39,38 +39,6 @@ func TestIsUnspendable(t *testing.T) {
        }
 }
 
-func Test00Multisig(t *testing.T) {
-       prog, err := BlockMultiSigProgram(nil, 0)
-       if err != nil {
-               t.Fatal(err)
-       }
-       if len(prog) < 1 {
-               t.Fatal("BlockMultiSigScript(0, 0) = {} want script")
-       }
-}
-
-func Test01Multisig(t *testing.T) {
-       pubkeys := []ed25519.PublicKey{{}}
-       _, err := BlockMultiSigProgram(pubkeys, 0)
-       if err == nil {
-               t.Fatal("BlockMultiSigScript(1, 0) = success want error")
-       }
-}
-
-func TestParse00Multisig(t *testing.T) {
-       prog, err := BlockMultiSigProgram(nil, 0)
-       if err != nil {
-               t.Fatal(err)
-       }
-       keys, quorum, err := ParseBlockMultiSigProgram(prog)
-       if err != nil {
-               t.Fatal(err)
-       }
-       if len(keys) != 0 || quorum != 0 {
-               t.Fatalf("ParseBlockMultiSigScript(%x) = (%v, %d) want (nil, 0)", prog, keys, quorum)
-       }
-}
-
 func TestP2SP(t *testing.T) {
        pub1, _, _ := ed25519.GenerateKey(nil)
        pub2, _, _ := ed25519.GenerateKey(nil)
@@ -89,22 +57,3 @@ func TestP2SP(t *testing.T) {
                t.Errorf("expected second pubkey to be %x, got %x", pub2, pubs[1])
        }
 }
-
-func TestBlockMultisig(t *testing.T) {
-       pub1, _, _ := ed25519.GenerateKey(nil)
-       pub2, _, _ := ed25519.GenerateKey(nil)
-       prog, _ := BlockMultiSigProgram([]ed25519.PublicKey{pub1, pub2}, 1)
-       pubs, n, err := ParseBlockMultiSigProgram(prog)
-       if err != nil {
-               t.Fatal(err)
-       }
-       if n != 1 {
-               t.Errorf("expected nrequired=1, got %d", n)
-       }
-       if !bytes.Equal(pubs[0], pub1) {
-               t.Errorf("expected first pubkey to be %x, got %x", pub1, pubs[0])
-       }
-       if !bytes.Equal(pubs[1], pub2) {
-               t.Errorf("expected second pubkey to be %x, got %x", pub2, pubs[1])
-       }
-}
index e10de31..f527745 100644 (file)
@@ -16,7 +16,7 @@ type BlockNonce [8]byte
 
 type ResultBlockchainInfo struct {
        LastHeight int                `json:"last_height"`
-       BlockMetas []*types.BlockMeta `json:"block_metas"`
+       //BlockMetas []*types.BlockMeta `json:"block_metas"`
 }
 
 type ResultGenesis struct {
@@ -24,15 +24,17 @@ type ResultGenesis struct {
 }
 
 type ResultBlock struct {
-       BlockMeta *types.BlockMeta `json:"block_meta"`
-       Block     *types.Block     `json:"block"`
+       //BlockMeta *types.BlockMeta `json:"block_meta"`
+       //Block     *types.Block     `json:"block"`
 }
 
+/*
 type ResultCommit struct {
        Header          *types.Header `json:"header"`
        Commit          *types.Commit `json:"commit"`
        CanonicalCommit bool          `json:"canonical"`
 }
+*/
 
 type ResultStatus struct {
        NodeInfo          *p2p.NodeInfo `json:"node_info"`
@@ -82,10 +84,12 @@ type Peer struct {
        ConnectionStatus p2p.ConnectionStatus `json:"connection_status"`
 }
 
+/*
 type ResultValidators struct {
        BlockHeight int                `json:"block_height"`
        Validators  []*types.Validator `json:"validators"`
 }
+*/
 
 type ResultDumpConsensusState struct {
        RoundState      string   `json:"round_state"`
@@ -107,6 +111,7 @@ type ResultBroadcastTxCommit struct {
        Height    int         `json:"height"`
 }
 
+/*
 type ResultTx struct {
        Height   int           `json:"height"`
        Index    int           `json:"index"`
@@ -114,11 +119,14 @@ type ResultTx struct {
        Tx       types.Tx      `json:"tx"`
        Proof    types.TxProof `json:"proof,omitempty"`
 }
+*/
 
+/*
 type ResultUnconfirmedTxs struct {
        N   int        `json:"n_txs"`
        Txs []types.Tx `json:"txs"`
 }
+*/
 
 type ResultABCIInfo struct {
        Response abci.ResponseInfo `json:"response"`
@@ -136,7 +144,9 @@ type ResultSubscribe struct{}
 
 type ResultUnsubscribe struct{}
 
+/*
 type ResultEvent struct {
        Name string            `json:"name"`
        Data types.TMEventData `json:"data"`
 }
+*/
diff --git a/testutil/deepequal.go b/testutil/deepequal.go
new file mode 100644 (file)
index 0000000..538500b
--- /dev/null
@@ -0,0 +1,155 @@
+package testutil
+
+import (
+       "reflect"
+       "unsafe"
+)
+
+type visit struct {
+       a1, a2 unsafe.Pointer
+       typ    reflect.Type
+}
+
+// DeepEqual is similar to reflect.DeepEqual, but treats nil as equal
+// to empty maps and slices. Some of the implementation is cribbed
+// from Go's reflect package.
+func DeepEqual(x, y interface{}) bool {
+       vx := reflect.ValueOf(x)
+       vy := reflect.ValueOf(y)
+       return deepValueEqual(vx, vy, make(map[visit]bool))
+}
+
+func deepValueEqual(x, y reflect.Value, visited map[visit]bool) bool {
+       if isEmpty(x) && isEmpty(y) {
+               return true
+       }
+       if !x.IsValid() {
+               return !y.IsValid()
+       }
+       if !y.IsValid() {
+               return false
+       }
+
+       tx := x.Type()
+       ty := y.Type()
+       if tx != ty {
+               return false
+       }
+
+       switch tx.Kind() {
+       case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct:
+               if x.CanAddr() && y.CanAddr() {
+                       a1 := unsafe.Pointer(x.UnsafeAddr())
+                       a2 := unsafe.Pointer(y.UnsafeAddr())
+                       if uintptr(a1) > uintptr(a2) {
+                               // Canonicalize order to reduce number of entries in visited.
+                               // Assumes non-moving garbage collector.
+                               a1, a2 = a2, a1
+                       }
+                       v := visit{a1, a2, tx}
+                       if visited[v] {
+                               return true
+                       }
+                       visited[v] = true
+               }
+       }
+
+       switch tx.Kind() {
+       case reflect.Bool:
+               return x.Bool() == y.Bool()
+
+       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+               return x.Int() == y.Int()
+
+       case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+               return x.Uint() == y.Uint()
+
+       case reflect.Float32, reflect.Float64:
+               return x.Float() == y.Float()
+
+       case reflect.Complex64, reflect.Complex128:
+               return x.Complex() == y.Complex()
+
+       case reflect.String:
+               return x.String() == y.String()
+
+       case reflect.Array:
+               for i := 0; i < tx.Len(); i++ {
+                       if !deepValueEqual(x.Index(i), y.Index(i), visited) {
+                               return false
+                       }
+               }
+               return true
+
+       case reflect.Slice:
+               ttx := tx.Elem()
+               tty := ty.Elem()
+               if ttx != tty {
+                       return false
+               }
+               if x.Len() != y.Len() {
+                       return false
+               }
+               for i := 0; i < x.Len(); i++ {
+                       if !deepValueEqual(x.Index(i), y.Index(i), visited) {
+                               return false
+                       }
+               }
+               return true
+
+       case reflect.Interface:
+               if x.IsNil() {
+                       return y.IsNil()
+               }
+               if y.IsNil() {
+                       return false
+               }
+               return deepValueEqual(x.Elem(), y.Elem(), visited)
+
+       case reflect.Ptr:
+               if x.Pointer() == y.Pointer() {
+                       return true
+               }
+               return deepValueEqual(x.Elem(), y.Elem(), visited)
+
+       case reflect.Struct:
+               for i := 0; i < tx.NumField(); i++ {
+                       if !deepValueEqual(x.Field(i), y.Field(i), visited) {
+                               return false
+                       }
+               }
+               return true
+
+       case reflect.Map:
+               if x.Pointer() == y.Pointer() {
+                       return true
+               }
+               if x.Len() != y.Len() {
+                       return false
+               }
+               for _, k := range x.MapKeys() {
+                       if !deepValueEqual(x.MapIndex(k), y.MapIndex(k), visited) {
+                               return false
+                       }
+               }
+               return true
+
+       case reflect.Func:
+               return x.IsNil() && y.IsNil()
+       }
+       return false
+}
+
+func isEmpty(v reflect.Value) bool {
+       if !v.IsValid() {
+               return true
+       }
+       switch v.Type().Kind() {
+       case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr:
+               return v.IsNil()
+
+       case reflect.Slice, reflect.Map:
+               return v.IsNil() || v.Len() == 0
+       }
+       return false
+}
diff --git a/testutil/deepequal_test.go b/testutil/deepequal_test.go
new file mode 100644 (file)
index 0000000..0544e0e
--- /dev/null
@@ -0,0 +1,40 @@
+package testutil
+
+import "testing"
+
+func TestDeepEqual(t *testing.T) {
+       type s struct {
+               a int
+               b string
+       }
+
+       cases := []struct {
+               a, b interface{}
+               want bool
+       }{
+               {1, 1, true},
+               {1, 2, false},
+               {nil, nil, true},
+               {nil, []byte{}, true},
+               {nil, []byte{1}, false},
+               {[]byte{1}, []byte{1}, true},
+               {[]byte{1}, []byte{2}, false},
+               {[]byte{1}, []byte{1, 2}, false},
+               {[]byte{1}, []string{"1"}, false},
+               {[3]byte{}, [4]byte{}, false},
+               {[3]byte{1}, [3]byte{1, 0, 0}, true},
+               {s{}, s{}, true},
+               {s{a: 1}, s{}, false},
+               {s{b: "foo"}, s{}, false},
+               {"foo", "foo", true},
+               {"foo", "bar", false},
+               {"foo", nil, false},
+       }
+
+       for i, c := range cases {
+               got := DeepEqual(c.a, c.b)
+               if got != c.want {
+                       t.Errorf("case %d: got %v want %v", i, got, c.want)
+               }
+       }
+}
diff --git a/testutil/expect.go b/testutil/expect.go
new file mode 100644 (file)
index 0000000..791622f
--- /dev/null
@@ -0,0 +1,27 @@
+package testutil
+
+import (
+       "fmt"
+       "os"
+       "path/filepath"
+       "strings"
+       "testing"
+
+       "chain/errors"
+)
+
+var wd, _ = os.Getwd()
+
+func FatalErr(t testing.TB, err error) {
+       args := []interface{}{err}
+       for _, frame := range errors.Stack(err) {
+               file := frame.File
+               if rel, err := filepath.Rel(wd, file); err == nil && !strings.HasPrefix(rel, "../") {
+                       file = rel
+               }
+               funcname := frame.Func[strings.IndexByte(frame.Func, '.')+1:]
+               s := fmt.Sprintf("\n%s:%d: %s", file, frame.Line, funcname)
+               args = append(args, s)
+       }
+       t.Fatal(args...)
+}
diff --git a/testutil/keys.go b/testutil/keys.go
new file mode 100644 (file)
index 0000000..2fc389f
--- /dev/null
@@ -0,0 +1,32 @@
+package testutil
+
+import (
+       "chain/crypto/ed25519"
+       "chain/crypto/ed25519/chainkd"
+)
+
+var (
+       TestXPub chainkd.XPub
+       TestXPrv chainkd.XPrv
+       TestPub  ed25519.PublicKey
+       TestPubs []ed25519.PublicKey
+)
+
+type zeroReader struct{}
+
+func (z zeroReader) Read(buf []byte) (int, error) {
+       for i := range buf {
+               buf[i] = 0
+       }
+       return len(buf), nil
+}
+
+func init() {
+       var err error
+       TestXPrv, TestXPub, err = chainkd.NewXKeys(zeroReader{})
+       if err != nil {
+               panic(err)
+       }
+       TestPub = TestXPub.PublicKey()
+       TestPubs = []ed25519.PublicKey{TestPub}
+}
diff --git a/types/block.go b/types/block.go
deleted file mode 100644 (file)
index b306d57..0000000
+++ /dev/null
@@ -1,423 +0,0 @@
-package types
-
-import (
-       "bytes"
-       "errors"
-       "fmt"
-       "io"
-       "strings"
-       "time"
-
-       wire "github.com/tendermint/go-wire"
-       "github.com/tendermint/go-wire/data"
-       . "github.com/tendermint/tmlibs/common"
-       "github.com/tendermint/tmlibs/merkle"
-)
-
-const (
-       MaxBlockSize         = 22020096 // 21MB TODO make it configurable
-       DefaultBlockPartSize = 65536    // 64kB TODO: put part size in parts header?
-)
-
-type Block struct {
-       *Header    `json:"header"`
-       *Data      `json:"data"`
-       LastCommit *Commit `json:"last_commit"`
-}
-
-// TODO: version
-func MakeBlock(height int, chainID string, txs []Tx, commit *Commit,
-       prevBlockID BlockID, valHash, appHash []byte, partSize int) (*Block, *PartSet) {
-       block := &Block{
-               Header: &Header{
-                       ChainID:        chainID,
-                       Height:         height,
-                       Time:           time.Now(),
-                       NumTxs:         len(txs),
-                       LastBlockID:    prevBlockID,
-                       ValidatorsHash: valHash,
-                       AppHash:        appHash, // state merkle root of txs from the previous block.
-               },
-               LastCommit: commit,
-               Data: &Data{
-                       Txs: txs,
-               },
-       }
-       block.FillHeader()
-       return block, block.MakePartSet(partSize)
-}
-
-// Basic validation that doesn't involve state data.
-func (b *Block) ValidateBasic(chainID string, lastBlockHeight int, lastBlockID BlockID,
-       lastBlockTime time.Time, appHash []byte) error {
-       if b.ChainID != chainID {
-               return errors.New(Fmt("Wrong Block.Header.ChainID. Expected %v, got %v", chainID, b.ChainID))
-       }
-       if b.Height != lastBlockHeight+1 {
-               return errors.New(Fmt("Wrong Block.Header.Height. Expected %v, got %v", lastBlockHeight+1, b.Height))
-       }
-       /*      TODO: Determine bounds for Time
-               See blockchain/reactor "stopSyncingDurationMinutes"
-
-               if !b.Time.After(lastBlockTime) {
-                       return errors.New("Invalid Block.Header.Time")
-               }
-       */
-       if b.NumTxs != len(b.Data.Txs) {
-               return errors.New(Fmt("Wrong Block.Header.NumTxs. Expected %v, got %v", len(b.Data.Txs), b.NumTxs))
-       }
-       if !b.LastBlockID.Equals(lastBlockID) {
-               return errors.New(Fmt("Wrong Block.Header.LastBlockID.  Expected %v, got %v", lastBlockID, b.LastBlockID))
-       }
-       if !bytes.Equal(b.LastCommitHash, b.LastCommit.Hash()) {
-               return errors.New(Fmt("Wrong Block.Header.LastCommitHash.  Expected %v, got %v", b.LastCommitHash, b.LastCommit.Hash()))
-       }
-       if b.Header.Height != 1 {
-               if err := b.LastCommit.ValidateBasic(); err != nil {
-                       return err
-               }
-       }
-       if !bytes.Equal(b.DataHash, b.Data.Hash()) {
-               return errors.New(Fmt("Wrong Block.Header.DataHash.  Expected %v, got %v", b.DataHash, b.Data.Hash()))
-       }
-       if !bytes.Equal(b.AppHash, appHash) {
-               return errors.New(Fmt("Wrong Block.Header.AppHash.  Expected %X, got %v", appHash, b.AppHash))
-       }
-       // NOTE: the AppHash and ValidatorsHash are validated later.
-       return nil
-}
-
-func (b *Block) FillHeader() {
-       if b.LastCommitHash == nil {
-               b.LastCommitHash = b.LastCommit.Hash()
-       }
-       if b.DataHash == nil {
-               b.DataHash = b.Data.Hash()
-       }
-}
-
-// Computes and returns the block hash.
-// If the block is incomplete, block hash is nil for safety.
-func (b *Block) Hash() data.Bytes {
-       // fmt.Println(">>", b.Data)
-       if b == nil || b.Header == nil || b.Data == nil || b.LastCommit == nil {
-               return nil
-       }
-       b.FillHeader()
-       return b.Header.Hash()
-}
-
-func (b *Block) MakePartSet(partSize int) *PartSet {
-       return NewPartSetFromData(wire.BinaryBytes(b), partSize)
-}
-
-// Convenience.
-// A nil block never hashes to anything.
-// Nothing hashes to a nil hash.
-func (b *Block) HashesTo(hash []byte) bool {
-       if len(hash) == 0 {
-               return false
-       }
-       if b == nil {
-               return false
-       }
-       return bytes.Equal(b.Hash(), hash)
-}
-
-func (b *Block) String() string {
-       return b.StringIndented("")
-}
-
-func (b *Block) StringIndented(indent string) string {
-       if b == nil {
-               return "nil-Block"
-       }
-       return fmt.Sprintf(`Block{
-%s  %v
-%s  %v
-%s  %v
-%s}#%v`,
-               indent, b.Header.StringIndented(indent+"  "),
-               indent, b.Data.StringIndented(indent+"  "),
-               indent, b.LastCommit.StringIndented(indent+"  "),
-               indent, b.Hash())
-}
-
-func (b *Block) StringShort() string {
-       if b == nil {
-               return "nil-Block"
-       } else {
-               return fmt.Sprintf("Block#%v", b.Hash())
-       }
-}
-
-//-----------------------------------------------------------------------------
-
-type Header struct {
-       ChainID        string     `json:"chain_id"`
-       Height         int        `json:"height"`
-       Time           time.Time  `json:"time"`
-       NumTxs         int        `json:"num_txs"` // XXX: Can we get rid of this?
-       LastBlockID    BlockID    `json:"last_block_id"`
-       LastCommitHash data.Bytes `json:"last_commit_hash"` // commit from validators from the last block
-       DataHash       data.Bytes `json:"data_hash"`        // transactions
-       ValidatorsHash data.Bytes `json:"validators_hash"`  // validators for the current block
-       AppHash        data.Bytes `json:"app_hash"`         // state after txs from the previous block
-}
-
-// NOTE: hash is nil if required fields are missing.
-func (h *Header) Hash() data.Bytes {
-       if len(h.ValidatorsHash) == 0 {
-               return nil
-       }
-       return merkle.SimpleHashFromMap(map[string]interface{}{
-               "ChainID":     h.ChainID,
-               "Height":      h.Height,
-               "Time":        h.Time,
-               "NumTxs":      h.NumTxs,
-               "LastBlockID": h.LastBlockID,
-               "LastCommit":  h.LastCommitHash,
-               "Data":        h.DataHash,
-               "Validators":  h.ValidatorsHash,
-               "App":         h.AppHash,
-       })
-}
-
-func (h *Header) StringIndented(indent string) string {
-       if h == nil {
-               return "nil-Header"
-       }
-       return fmt.Sprintf(`Header{
-%s  ChainID:        %v
-%s  Height:         %v
-%s  Time:           %v
-%s  NumTxs:         %v
-%s  LastBlockID:    %v
-%s  LastCommit:     %v
-%s  Data:           %v
-%s  Validators:     %v
-%s  App:            %v
-%s}#%v`,
-               indent, h.ChainID,
-               indent, h.Height,
-               indent, h.Time,
-               indent, h.NumTxs,
-               indent, h.LastBlockID,
-               indent, h.LastCommitHash,
-               indent, h.DataHash,
-               indent, h.ValidatorsHash,
-               indent, h.AppHash,
-               indent, h.Hash())
-}
-
-//-------------------------------------
-
-// NOTE: Commit is empty for height 1, but never nil.
-type Commit struct {
-       // NOTE: The Precommits are in order of address to preserve the bonded ValidatorSet order.
-       // Any peer with a block can gossip precommits by index with a peer without recalculating the
-       // active ValidatorSet.
-       BlockID    BlockID `json:"blockID"`
-       Precommits []*Vote `json:"precommits"`
-
-       // Volatile
-       firstPrecommit *Vote
-       hash           data.Bytes
-       bitArray       *BitArray
-}
-
-func (commit *Commit) FirstPrecommit() *Vote {
-       if len(commit.Precommits) == 0 {
-               return nil
-       }
-       if commit.firstPrecommit != nil {
-               return commit.firstPrecommit
-       }
-       for _, precommit := range commit.Precommits {
-               if precommit != nil {
-                       commit.firstPrecommit = precommit
-                       return precommit
-               }
-       }
-       return nil
-}
-
-func (commit *Commit) Height() int {
-       if len(commit.Precommits) == 0 {
-               return 0
-       }
-       return commit.FirstPrecommit().Height
-}
-
-func (commit *Commit) Round() int {
-       if len(commit.Precommits) == 0 {
-               return 0
-       }
-       return commit.FirstPrecommit().Round
-}
-
-func (commit *Commit) Type() byte {
-       return VoteTypePrecommit
-}
-
-func (commit *Commit) Size() int {
-       if commit == nil {
-               return 0
-       }
-       return len(commit.Precommits)
-}
-
-func (commit *Commit) BitArray() *BitArray {
-       if commit.bitArray == nil {
-               commit.bitArray = NewBitArray(len(commit.Precommits))
-               for i, precommit := range commit.Precommits {
-                       commit.bitArray.SetIndex(i, precommit != nil)
-               }
-       }
-       return commit.bitArray
-}
-
-func (commit *Commit) GetByIndex(index int) *Vote {
-       return commit.Precommits[index]
-}
-
-func (commit *Commit) IsCommit() bool {
-       if len(commit.Precommits) == 0 {
-               return false
-       }
-       return true
-}
-
-func (commit *Commit) ValidateBasic() error {
-       if commit.BlockID.IsZero() {
-               return errors.New("Commit cannot be for nil block")
-       }
-       if len(commit.Precommits) == 0 {
-               return errors.New("No precommits in commit")
-       }
-       height, round := commit.Height(), commit.Round()
-
-       // validate the precommits
-       for _, precommit := range commit.Precommits {
-               // It's OK for precommits to be missing.
-               if precommit == nil {
-                       continue
-               }
-               // Ensure that all votes are precommits
-               if precommit.Type != VoteTypePrecommit {
-                       return fmt.Errorf("Invalid commit vote. Expected precommit, got %v",
-                               precommit.Type)
-               }
-               // Ensure that all heights are the same
-               if precommit.Height != height {
-                       return fmt.Errorf("Invalid commit precommit height. Expected %v, got %v",
-                               height, precommit.Height)
-               }
-               // Ensure that all rounds are the same
-               if precommit.Round != round {
-                       return fmt.Errorf("Invalid commit precommit round. Expected %v, got %v",
-                               round, precommit.Round)
-               }
-       }
-       return nil
-}
-
-func (commit *Commit) Hash() data.Bytes {
-       if commit.hash == nil {
-               bs := make([]interface{}, len(commit.Precommits))
-               for i, precommit := range commit.Precommits {
-                       bs[i] = precommit
-               }
-               commit.hash = merkle.SimpleHashFromBinaries(bs)
-       }
-       return commit.hash
-}
-
-func (commit *Commit) StringIndented(indent string) string {
-       if commit == nil {
-               return "nil-Commit"
-       }
-       precommitStrings := make([]string, len(commit.Precommits))
-       for i, precommit := range commit.Precommits {
-               precommitStrings[i] = precommit.String()
-       }
-       return fmt.Sprintf(`Commit{
-%s  BlockID:    %v
-%s  Precommits: %v
-%s}#%v`,
-               indent, commit.BlockID,
-               indent, strings.Join(precommitStrings, "\n"+indent+"  "),
-               indent, commit.hash)
-}
-
-//-----------------------------------------------------------------------------
-
-type Data struct {
-
-       // Txs that will be applied by state @ block.Height+1.
-       // NOTE: not all txs here are valid.  We're just agreeing on the order first.
-       // This means that block.AppHash does not include these txs.
-       Txs Txs `json:"txs"`
-
-       // Volatile
-       hash data.Bytes
-}
-
-func (data *Data) Hash() data.Bytes {
-       if data.hash == nil {
-               data.hash = data.Txs.Hash() // NOTE: leaves of merkle tree are TxIDs
-       }
-       return data.hash
-}
-
-func (data *Data) StringIndented(indent string) string {
-       if data == nil {
-               return "nil-Data"
-       }
-       txStrings := make([]string, MinInt(len(data.Txs), 21))
-       for i, tx := range data.Txs {
-               if i == 20 {
-                       txStrings[i] = fmt.Sprintf("... (%v total)", len(data.Txs))
-                       break
-               }
-               txStrings[i] = fmt.Sprintf("Tx:%v", tx)
-       }
-       return fmt.Sprintf(`Data{
-%s  %v
-%s}#%v`,
-               indent, strings.Join(txStrings, "\n"+indent+"  "),
-               indent, data.hash)
-}
-
-//--------------------------------------------------------------------------------
-
-type BlockID struct {
-       Hash        data.Bytes    `json:"hash"`
-       PartsHeader PartSetHeader `json:"parts"`
-}
-
-func (blockID BlockID) IsZero() bool {
-       return len(blockID.Hash) == 0 && blockID.PartsHeader.IsZero()
-}
-
-func (blockID BlockID) Equals(other BlockID) bool {
-       return bytes.Equal(blockID.Hash, other.Hash) &&
-               blockID.PartsHeader.Equals(other.PartsHeader)
-}
-
-func (blockID BlockID) Key() string {
-       return string(blockID.Hash) + string(wire.BinaryBytes(blockID.PartsHeader))
-}
-
-func (blockID BlockID) WriteSignBytes(w io.Writer, n *int, err *error) {
-       if blockID.IsZero() {
-               wire.WriteTo([]byte("null"), w, n, err)
-       } else {
-               wire.WriteJSON(CanonicalBlockID(blockID), w, n, err)
-       }
-
-}
-
-func (blockID BlockID) String() string {
-       return fmt.Sprintf(`%v:%v`, blockID.Hash, blockID.PartsHeader)
-}
diff --git a/types/block_meta.go b/types/block_meta.go
deleted file mode 100644 (file)
index 8e5bd43..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-package types
-
-type BlockMeta struct {
-       BlockID BlockID `json:"block_id"` // the block hash and partsethash
-       Header  *Header `json:"header"`   // The block's Header
-}
-
-func NewBlockMeta(block *Block, blockParts *PartSet) *BlockMeta {
-       return &BlockMeta{
-               BlockID: BlockID{block.Hash(), blockParts.Header()},
-               Header:  block.Header,
-       }
-}
diff --git a/types/canonical_json.go b/types/canonical_json.go
deleted file mode 100644 (file)
index 2e8583a..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-package types
-
-import (
-       "github.com/tendermint/go-wire/data"
-)
-
-// canonical json is go-wire's json for structs with fields in alphabetical order
-
-type CanonicalJSONBlockID struct {
-       Hash        data.Bytes                 `json:"hash,omitempty"`
-       PartsHeader CanonicalJSONPartSetHeader `json:"parts,omitempty"`
-}
-
-type CanonicalJSONPartSetHeader struct {
-       Hash  data.Bytes `json:"hash"`
-       Total int        `json:"total"`
-}
-
-type CanonicalJSONProposal struct {
-       BlockPartsHeader CanonicalJSONPartSetHeader `json:"block_parts_header"`
-       Height           int                        `json:"height"`
-       POLBlockID       CanonicalJSONBlockID       `json:"pol_block_id"`
-       POLRound         int                        `json:"pol_round"`
-       Round            int                        `json:"round"`
-}
-
-type CanonicalJSONVote struct {
-       BlockID CanonicalJSONBlockID `json:"block_id"`
-       Height  int                  `json:"height"`
-       Round   int                  `json:"round"`
-       Type    byte                 `json:"type"`
-}
-
-//------------------------------------
-// Messages including a "chain id" can only be applied to one chain, hence "Once"
-
-type CanonicalJSONOnceProposal struct {
-       ChainID  string                `json:"chain_id"`
-       Proposal CanonicalJSONProposal `json:"proposal"`
-}
-
-type CanonicalJSONOnceVote struct {
-       ChainID string            `json:"chain_id"`
-       Vote    CanonicalJSONVote `json:"vote"`
-}
-
-//-----------------------------------
-// Canonicalize the structs
-
-func CanonicalBlockID(blockID BlockID) CanonicalJSONBlockID {
-       return CanonicalJSONBlockID{
-               Hash:        blockID.Hash,
-               PartsHeader: CanonicalPartSetHeader(blockID.PartsHeader),
-       }
-}
-
-func CanonicalPartSetHeader(psh PartSetHeader) CanonicalJSONPartSetHeader {
-       return CanonicalJSONPartSetHeader{
-               psh.Hash,
-               psh.Total,
-       }
-}
-
-func CanonicalProposal(proposal *Proposal) CanonicalJSONProposal {
-       return CanonicalJSONProposal{
-               BlockPartsHeader: CanonicalPartSetHeader(proposal.BlockPartsHeader),
-               Height:           proposal.Height,
-               POLBlockID:       CanonicalBlockID(proposal.POLBlockID),
-               POLRound:         proposal.POLRound,
-               Round:            proposal.Round,
-       }
-}
-
-func CanonicalVote(vote *Vote) CanonicalJSONVote {
-       return CanonicalJSONVote{
-               CanonicalBlockID(vote.BlockID),
-               vote.Height,
-               vote.Round,
-               vote.Type,
-       }
-}
index 8c29c44..9f79c34 100644 (file)
@@ -4,7 +4,7 @@ import (
        // for registering TMEventData as events.EventData
        abci "github.com/tendermint/abci/types"
        "github.com/tendermint/go-wire/data"
-       cmn "github.com/tendermint/tmlibs/common"
+       //cmn "github.com/tendermint/tmlibs/common"
        "github.com/tendermint/tmlibs/events"
 )
 
@@ -16,7 +16,7 @@ func EventStringUnbond() string  { return "Unbond" }
 func EventStringRebond() string  { return "Rebond" }
 func EventStringDupeout() string { return "Dupeout" }
 func EventStringFork() string    { return "Fork" }
-func EventStringTx(tx Tx) string { return cmn.Fmt("Tx:%X", tx.Hash()) }
+//func EventStringTx(tx Tx) string { return cmn.Fmt("Tx:%X", tx.Hash()) }
 
 func EventStringNewBlock() string         { return "NewBlock" }
 func EventStringNewBlockHeader() string   { return "NewBlockHeader" }
@@ -97,18 +97,18 @@ var tmEventDataMapper = data.NewMapper(TMEventData{}).
 // but some (an input to a call tx or a receive) are more exotic
 
 type EventDataNewBlock struct {
-       Block *Block `json:"block"`
+       //Block *Block `json:"block"`
 }
 
 // light weight event for benchmarking
 type EventDataNewBlockHeader struct {
-       Header *Header `json:"header"`
+       //Header *Header `json:"header"`
 }
 
 // All txs fire EventDataTx
 type EventDataTx struct {
        Height int           `json:"height"`
-       Tx     Tx            `json:"tx"`
+       //Tx     Tx            `json:"tx"`
        Data   data.Bytes    `json:"data"`
        Log    string        `json:"log"`
        Code   abci.CodeType `json:"code"`
@@ -126,7 +126,7 @@ type EventDataRoundState struct {
 }
 
 type EventDataVote struct {
-       Vote *Vote
+       //Vote *Vote
 }
 
 func (_ EventDataNewBlock) AssertIsTMEventData()       {}
@@ -191,9 +191,11 @@ func FireEventVote(fireable events.Fireable, vote EventDataVote) {
        fireEvent(fireable, EventStringVote(), TMEventData{vote})
 }
 
+/*
 func FireEventTx(fireable events.Fireable, tx EventDataTx) {
        fireEvent(fireable, EventStringTx(tx.Tx), TMEventData{tx})
 }
+*/
 
 //--- EventDataRoundState events
 
index 75999f6..283b61c 100644 (file)
@@ -4,7 +4,6 @@ import (
        "encoding/json"
        "time"
 
-       "github.com/tendermint/go-crypto"
        "github.com/tendermint/go-wire/data"
        cmn "github.com/tendermint/tmlibs/common"
 )
@@ -16,17 +15,10 @@ var GenDocKey = []byte("GenDocKey")
 
 //------------------------------------------------------------
 // core types for a genesis definition
-
-type GenesisValidator struct {
-       PubKey crypto.PubKey `json:"pub_key"`
-       Amount int64         `json:"amount"`
-       Name   string        `json:"name"`
-}
-
 type GenesisDoc struct {
        GenesisTime time.Time          `json:"genesis_time"`
        ChainID     string             `json:"chain_id"`
-       Validators  []GenesisValidator `json:"validators"`
+       PrivateKey  string                              `json:"private_key"`
        AppHash     data.Bytes         `json:"app_hash"`
 }
 
diff --git a/types/keys.go b/types/keys.go
deleted file mode 100644 (file)
index 90591b9..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-package types
-
-var (
-       PeerStateKey     = "ConsensusReactor.peerState"
-       PeerMempoolChKey = "MempoolReactor.peerMempoolCh"
-)
diff --git a/types/part_set.go b/types/part_set.go
deleted file mode 100644 (file)
index e15d2ca..0000000
+++ /dev/null
@@ -1,276 +0,0 @@
-package types
-
-import (
-       "bytes"
-       "errors"
-       "fmt"
-       "io"
-       "sync"
-
-       "golang.org/x/crypto/ripemd160"
-
-       "github.com/tendermint/go-wire"
-       "github.com/tendermint/go-wire/data"
-       cmn "github.com/tendermint/tmlibs/common"
-       "github.com/tendermint/tmlibs/merkle"
-)
-
-var (
-       ErrPartSetUnexpectedIndex = errors.New("Error part set unexpected index")
-       ErrPartSetInvalidProof    = errors.New("Error part set invalid proof")
-)
-
-type Part struct {
-       Index int                `json:"index"`
-       Bytes data.Bytes         `json:"bytes"`
-       Proof merkle.SimpleProof `json:"proof"`
-
-       // Cache
-       hash []byte
-}
-
-func (part *Part) Hash() []byte {
-       if part.hash != nil {
-               return part.hash
-       } else {
-               hasher := ripemd160.New()
-               hasher.Write(part.Bytes) // doesn't err
-               part.hash = hasher.Sum(nil)
-               return part.hash
-       }
-}
-
-func (part *Part) String() string {
-       return part.StringIndented("")
-}
-
-func (part *Part) StringIndented(indent string) string {
-       return fmt.Sprintf(`Part{#%v
-%s  Bytes: %X...
-%s  Proof: %v
-%s}`,
-               part.Index,
-               indent, cmn.Fingerprint(part.Bytes),
-               indent, part.Proof.StringIndented(indent+"  "),
-               indent)
-}
-
-//-------------------------------------
-
-type PartSetHeader struct {
-       Total int        `json:"total"`
-       Hash  data.Bytes `json:"hash"`
-}
-
-func (psh PartSetHeader) String() string {
-       return fmt.Sprintf("%v:%X", psh.Total, cmn.Fingerprint(psh.Hash))
-}
-
-func (psh PartSetHeader) IsZero() bool {
-       return psh.Total == 0
-}
-
-func (psh PartSetHeader) Equals(other PartSetHeader) bool {
-       return psh.Total == other.Total && bytes.Equal(psh.Hash, other.Hash)
-}
-
-func (psh PartSetHeader) WriteSignBytes(w io.Writer, n *int, err *error) {
-       wire.WriteJSON(CanonicalPartSetHeader(psh), w, n, err)
-}
-
-//-------------------------------------
-
-type PartSet struct {
-       total int
-       hash  []byte
-
-       mtx           sync.Mutex
-       parts         []*Part
-       partsBitArray *cmn.BitArray
-       count         int
-}
-
-// Returns an immutable, full PartSet from the data bytes.
-// The data bytes are split into "partSize" chunks, and merkle tree computed.
-func NewPartSetFromData(data []byte, partSize int) *PartSet {
-       // divide data into 4kb parts.
-       total := (len(data) + partSize - 1) / partSize
-       parts := make([]*Part, total)
-       parts_ := make([]merkle.Hashable, total)
-       partsBitArray := cmn.NewBitArray(total)
-       for i := 0; i < total; i++ {
-               part := &Part{
-                       Index: i,
-                       Bytes: data[i*partSize : cmn.MinInt(len(data), (i+1)*partSize)],
-               }
-               parts[i] = part
-               parts_[i] = part
-               partsBitArray.SetIndex(i, true)
-       }
-       // Compute merkle proofs
-       root, proofs := merkle.SimpleProofsFromHashables(parts_)
-       for i := 0; i < total; i++ {
-               parts[i].Proof = *proofs[i]
-       }
-       return &PartSet{
-               total:         total,
-               hash:          root,
-               parts:         parts,
-               partsBitArray: partsBitArray,
-               count:         total,
-       }
-}
-
-// Returns an empty PartSet ready to be populated.
-func NewPartSetFromHeader(header PartSetHeader) *PartSet {
-       return &PartSet{
-               total:         header.Total,
-               hash:          header.Hash,
-               parts:         make([]*Part, header.Total),
-               partsBitArray: cmn.NewBitArray(header.Total),
-               count:         0,
-       }
-}
-
-func (ps *PartSet) Header() PartSetHeader {
-       if ps == nil {
-               return PartSetHeader{}
-       } else {
-               return PartSetHeader{
-                       Total: ps.total,
-                       Hash:  ps.hash,
-               }
-       }
-}
-
-func (ps *PartSet) HasHeader(header PartSetHeader) bool {
-       if ps == nil {
-               return false
-       } else {
-               return ps.Header().Equals(header)
-       }
-}
-
-func (ps *PartSet) BitArray() *cmn.BitArray {
-       ps.mtx.Lock()
-       defer ps.mtx.Unlock()
-       return ps.partsBitArray.Copy()
-}
-
-func (ps *PartSet) Hash() []byte {
-       if ps == nil {
-               return nil
-       }
-       return ps.hash
-}
-
-func (ps *PartSet) HashesTo(hash []byte) bool {
-       if ps == nil {
-               return false
-       }
-       return bytes.Equal(ps.hash, hash)
-}
-
-func (ps *PartSet) Count() int {
-       if ps == nil {
-               return 0
-       }
-       return ps.count
-}
-
-func (ps *PartSet) Total() int {
-       if ps == nil {
-               return 0
-       }
-       return ps.total
-}
-
-func (ps *PartSet) AddPart(part *Part, verify bool) (bool, error) {
-       ps.mtx.Lock()
-       defer ps.mtx.Unlock()
-
-       // Invalid part index
-       if part.Index >= ps.total {
-               return false, ErrPartSetUnexpectedIndex
-       }
-
-       // If part already exists, return false.
-       if ps.parts[part.Index] != nil {
-               return false, nil
-       }
-
-       // Check hash proof
-       if verify {
-               if !part.Proof.Verify(part.Index, ps.total, part.Hash(), ps.Hash()) {
-                       return false, ErrPartSetInvalidProof
-               }
-       }
-
-       // Add part
-       ps.parts[part.Index] = part
-       ps.partsBitArray.SetIndex(part.Index, true)
-       ps.count++
-       return true, nil
-}
-
-func (ps *PartSet) GetPart(index int) *Part {
-       ps.mtx.Lock()
-       defer ps.mtx.Unlock()
-       return ps.parts[index]
-}
-
-func (ps *PartSet) IsComplete() bool {
-       return ps.count == ps.total
-}
-
-func (ps *PartSet) GetReader() io.Reader {
-       if !ps.IsComplete() {
-               cmn.PanicSanity("Cannot GetReader() on incomplete PartSet")
-       }
-       return NewPartSetReader(ps.parts)
-}
-
-type PartSetReader struct {
-       i      int
-       parts  []*Part
-       reader *bytes.Reader
-}
-
-func NewPartSetReader(parts []*Part) *PartSetReader {
-       return &PartSetReader{
-               i:      0,
-               parts:  parts,
-               reader: bytes.NewReader(parts[0].Bytes),
-       }
-}
-
-func (psr *PartSetReader) Read(p []byte) (n int, err error) {
-       readerLen := psr.reader.Len()
-       if readerLen >= len(p) {
-               return psr.reader.Read(p)
-       } else if readerLen > 0 {
-               n1, err := psr.Read(p[:readerLen])
-               if err != nil {
-                       return n1, err
-               }
-               n2, err := psr.Read(p[readerLen:])
-               return n1 + n2, err
-       }
-
-       psr.i += 1
-       if psr.i >= len(psr.parts) {
-               return 0, io.EOF
-       }
-       psr.reader = bytes.NewReader(psr.parts[psr.i].Bytes)
-       return psr.Read(p)
-}
-
-func (ps *PartSet) StringShort() string {
-       if ps == nil {
-               return "nil-PartSet"
-       } else {
-               ps.mtx.Lock()
-               defer ps.mtx.Unlock()
-               return fmt.Sprintf("(%v of %v)", ps.Count(), ps.Total())
-       }
-}
diff --git a/types/part_set_test.go b/types/part_set_test.go
deleted file mode 100644 (file)
index 7088ef3..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-package types
-
-import (
-       "bytes"
-       "io/ioutil"
-       "testing"
-
-       . "github.com/tendermint/tmlibs/common"
-)
-
-const (
-       testPartSize = 65536 // 64KB ...  4096 // 4KB
-)
-
-func TestBasicPartSet(t *testing.T) {
-
-       // Construct random data of size partSize * 100
-       data := RandBytes(testPartSize * 100)
-
-       partSet := NewPartSetFromData(data, testPartSize)
-       if len(partSet.Hash()) == 0 {
-               t.Error("Expected to get hash")
-       }
-       if partSet.Total() != 100 {
-               t.Errorf("Expected to get 100 parts, but got %v", partSet.Total())
-       }
-       if !partSet.IsComplete() {
-               t.Errorf("PartSet should be complete")
-       }
-
-       // Test adding parts to a new partSet.
-       partSet2 := NewPartSetFromHeader(partSet.Header())
-
-       for i := 0; i < partSet.Total(); i++ {
-               part := partSet.GetPart(i)
-               //t.Logf("\n%v", part)
-               added, err := partSet2.AddPart(part, true)
-               if !added || err != nil {
-                       t.Errorf("Failed to add part %v, error: %v", i, err)
-               }
-       }
-
-       if !bytes.Equal(partSet.Hash(), partSet2.Hash()) {
-               t.Error("Expected to get same hash")
-       }
-       if partSet2.Total() != 100 {
-               t.Errorf("Expected to get 100 parts, but got %v", partSet2.Total())
-       }
-       if !partSet2.IsComplete() {
-               t.Errorf("Reconstructed PartSet should be complete")
-       }
-
-       // Reconstruct data, assert that they are equal.
-       data2Reader := partSet2.GetReader()
-       data2, err := ioutil.ReadAll(data2Reader)
-       if err != nil {
-               t.Errorf("Error reading data2Reader: %v", err)
-       }
-       if !bytes.Equal(data, data2) {
-               t.Errorf("Got wrong data.")
-       }
-
-}
-
-func TestWrongProof(t *testing.T) {
-
-       // Construct random data of size partSize * 100
-       data := RandBytes(testPartSize * 100)
-       partSet := NewPartSetFromData(data, testPartSize)
-
-       // Test adding a part with wrong data.
-       partSet2 := NewPartSetFromHeader(partSet.Header())
-
-       // Test adding a part with wrong trail.
-       part := partSet.GetPart(0)
-       part.Proof.Aunts[0][0] += byte(0x01)
-       added, err := partSet2.AddPart(part, true)
-       if added || err == nil {
-               t.Errorf("Expected to fail adding a part with bad trail.")
-       }
-
-       // Test adding a part with wrong bytes.
-       part = partSet.GetPart(1)
-       part.Bytes[0] += byte(0x01)
-       added, err = partSet2.AddPart(part, true)
-       if added || err == nil {
-               t.Errorf("Expected to fail adding a part with bad bytes.")
-       }
-
-}
diff --git a/types/priv_validator.go b/types/priv_validator.go
deleted file mode 100644 (file)
index 8c9a011..0000000
+++ /dev/null
@@ -1,275 +0,0 @@
-package types
-
-import (
-       "bytes"
-       "encoding/json"
-       "errors"
-       "fmt"
-       "io/ioutil"
-       "os"
-       "sync"
-
-       crypto "github.com/tendermint/go-crypto"
-       data "github.com/tendermint/go-wire/data"
-       . "github.com/tendermint/tmlibs/common"
-       "github.com/tendermint/tmlibs/log"
-)
-
-const (
-       stepNone      = 0 // Used to distinguish the initial state
-       stepPropose   = 1
-       stepPrevote   = 2
-       stepPrecommit = 3
-)
-
-func voteToStep(vote *Vote) int8 {
-       switch vote.Type {
-       case VoteTypePrevote:
-               return stepPrevote
-       case VoteTypePrecommit:
-               return stepPrecommit
-       default:
-               PanicSanity("Unknown vote type")
-               return 0
-       }
-}
-
-type PrivValidator struct {
-       Address       data.Bytes       `json:"address"`
-       PubKey        crypto.PubKey    `json:"pub_key"`
-       LastHeight    int              `json:"last_height"`
-       LastRound     int              `json:"last_round"`
-       LastStep      int8             `json:"last_step"`
-       LastSignature crypto.Signature `json:"last_signature,omitempty"` // so we dont lose signatures
-       LastSignBytes data.Bytes       `json:"last_signbytes,omitempty"` // so we dont lose signatures
-
-       // PrivKey should be empty if a Signer other than the default is being used.
-       PrivKey crypto.PrivKey `json:"priv_key"`
-       Signer  `json:"-"`
-
-       // For persistence.
-       // Overloaded for testing.
-       filePath string
-       mtx      sync.Mutex
-}
-
-// This is used to sign votes.
-// It is the caller's duty to verify the msg before calling Sign,
-// eg. to avoid double signing.
-// Currently, the only callers are SignVote and SignProposal
-type Signer interface {
-       PubKey() crypto.PubKey
-       Sign(msg []byte) crypto.Signature
-}
-
-// Implements Signer
-type DefaultSigner struct {
-       priv crypto.PrivKey
-}
-
-func NewDefaultSigner(priv crypto.PrivKey) *DefaultSigner {
-       return &DefaultSigner{priv: priv}
-}
-
-// Implements Signer
-func (ds *DefaultSigner) Sign(msg []byte) crypto.Signature {
-       return ds.priv.Sign(msg)
-}
-
-// Implements Signer
-func (ds *DefaultSigner) PubKey() crypto.PubKey {
-       return ds.priv.PubKey()
-}
-
-func (privVal *PrivValidator) SetSigner(s Signer) {
-       privVal.Signer = s
-       privVal.setPubKeyAndAddress()
-}
-
-// Overwrite address and pubkey for convenience
-func (privVal *PrivValidator) setPubKeyAndAddress() {
-       privVal.PubKey = privVal.Signer.PubKey()
-       privVal.Address = privVal.PubKey.Address()
-}
-
-// Generates a new validator with private key.
-func GenPrivValidator() *PrivValidator {
-       privKey := crypto.GenPrivKeyEd25519().Wrap()
-       pubKey := privKey.PubKey()
-       return &PrivValidator{
-               Address:  pubKey.Address(),
-               PubKey:   pubKey,
-               PrivKey:  privKey,
-               LastStep: stepNone,
-               filePath: "",
-               Signer:   NewDefaultSigner(privKey),
-       }
-}
-
-func LoadPrivValidator(filePath string) *PrivValidator {
-       privValJSONBytes, err := ioutil.ReadFile(filePath)
-       if err != nil {
-               Exit(err.Error())
-       }
-       privVal := PrivValidator{}
-       err = json.Unmarshal(privValJSONBytes, &privVal)
-       if err != nil {
-               Exit(Fmt("Error reading PrivValidator from %v: %v\n", filePath, err))
-       }
-
-       privVal.filePath = filePath
-       privVal.Signer = NewDefaultSigner(privVal.PrivKey)
-       privVal.setPubKeyAndAddress()
-       return &privVal
-}
-
-func LoadOrGenPrivValidator(filePath string, logger log.Logger) *PrivValidator {
-       var privValidator *PrivValidator
-       if _, err := os.Stat(filePath); err == nil {
-               privValidator = LoadPrivValidator(filePath)
-               logger.Info("Loaded PrivValidator",
-                       "file", filePath, "privValidator", privValidator)
-       } else {
-               privValidator = GenPrivValidator()
-               privValidator.SetFile(filePath)
-               privValidator.Save()
-               logger.Info("Generated PrivValidator", "file", filePath)
-       }
-       return privValidator
-}
-
-func (privVal *PrivValidator) SetFile(filePath string) {
-       privVal.mtx.Lock()
-       defer privVal.mtx.Unlock()
-       privVal.filePath = filePath
-}
-
-func (privVal *PrivValidator) Save() {
-       privVal.mtx.Lock()
-       defer privVal.mtx.Unlock()
-       privVal.save()
-}
-
-func (privVal *PrivValidator) save() {
-       if privVal.filePath == "" {
-               PanicSanity("Cannot save PrivValidator: filePath not set")
-       }
-       jsonBytes, err := json.Marshal(privVal)
-       if err != nil {
-               // `@; BOOM!!!
-               PanicCrisis(err)
-       }
-       err = WriteFileAtomic(privVal.filePath, jsonBytes, 0600)
-       if err != nil {
-               // `@; BOOM!!!
-               PanicCrisis(err)
-       }
-}
-
-// NOTE: Unsafe!
-func (privVal *PrivValidator) Reset() {
-       privVal.LastHeight = 0
-       privVal.LastRound = 0
-       privVal.LastStep = 0
-       privVal.LastSignature = crypto.Signature{}
-       privVal.LastSignBytes = nil
-       privVal.Save()
-}
-
-func (privVal *PrivValidator) GetAddress() []byte {
-       return privVal.Address
-}
-
-func (privVal *PrivValidator) SignVote(chainID string, vote *Vote) error {
-       privVal.mtx.Lock()
-       defer privVal.mtx.Unlock()
-       signature, err := privVal.signBytesHRS(vote.Height, vote.Round, voteToStep(vote), SignBytes(chainID, vote))
-       if err != nil {
-               return errors.New(Fmt("Error signing vote: %v", err))
-       }
-       vote.Signature = signature
-       return nil
-}
-
-func (privVal *PrivValidator) SignProposal(chainID string, proposal *Proposal) error {
-       privVal.mtx.Lock()
-       defer privVal.mtx.Unlock()
-       signature, err := privVal.signBytesHRS(proposal.Height, proposal.Round, stepPropose, SignBytes(chainID, proposal))
-       if err != nil {
-               return fmt.Errorf("Error signing proposal: %v", err)
-       }
-       proposal.Signature = signature
-       return nil
-}
-
-// check if there's a regression. Else sign and write the hrs+signature to disk
-func (privVal *PrivValidator) signBytesHRS(height, round int, step int8, signBytes []byte) (crypto.Signature, error) {
-       sig := crypto.Signature{}
-       // If height regression, err
-       if privVal.LastHeight > height {
-               return sig, errors.New("Height regression")
-       }
-       // More cases for when the height matches
-       if privVal.LastHeight == height {
-               // If round regression, err
-               if privVal.LastRound > round {
-                       return sig, errors.New("Round regression")
-               }
-               // If step regression, err
-               if privVal.LastRound == round {
-                       if privVal.LastStep > step {
-                               return sig, errors.New("Step regression")
-                       } else if privVal.LastStep == step {
-                               if privVal.LastSignBytes != nil {
-                                       if privVal.LastSignature.Empty() {
-                                               PanicSanity("privVal: LastSignature is nil but LastSignBytes is not!")
-                                       }
-                                       // so we dont sign a conflicting vote or proposal
-                                       // NOTE: proposals are non-deterministic (include time),
-                                       // so we can actually lose them, but will still never sign conflicting ones
-                                       if bytes.Equal(privVal.LastSignBytes, signBytes) {
-                                               // log.Notice("Using privVal.LastSignature", "sig", privVal.LastSignature)
-                                               return privVal.LastSignature, nil
-                                       }
-                               }
-                               return sig, errors.New("Step regression")
-                       }
-               }
-       }
-
-       // Sign
-       sig = privVal.Sign(signBytes)
-
-       // Persist height/round/step
-       privVal.LastHeight = height
-       privVal.LastRound = round
-       privVal.LastStep = step
-       privVal.LastSignature = sig
-       privVal.LastSignBytes = signBytes
-       privVal.save()
-
-       return sig, nil
-
-}
-
-func (privVal *PrivValidator) String() string {
-       return fmt.Sprintf("PrivValidator{%v LH:%v, LR:%v, LS:%v}", privVal.Address, privVal.LastHeight, privVal.LastRound, privVal.LastStep)
-}
-
-//-------------------------------------
-
-type PrivValidatorsByAddress []*PrivValidator
-
-func (pvs PrivValidatorsByAddress) Len() int {
-       return len(pvs)
-}
-
-func (pvs PrivValidatorsByAddress) Less(i, j int) bool {
-       return bytes.Compare(pvs[i].Address, pvs[j].Address) == -1
-}
-
-func (pvs PrivValidatorsByAddress) Swap(i, j int) {
-       it := pvs[i]
-       pvs[i] = pvs[j]
-       pvs[j] = it
-}
diff --git a/types/priv_validator_test.go b/types/priv_validator_test.go
deleted file mode 100644 (file)
index 1eb0b57..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-package types
-
-import (
-       "encoding/hex"
-       "encoding/json"
-       "fmt"
-       "testing"
-
-       "github.com/stretchr/testify/assert"
-       "github.com/stretchr/testify/require"
-       crypto "github.com/tendermint/go-crypto"
-)
-
-func TestLoadValidator(t *testing.T) {
-       assert, require := assert.New(t), require.New(t)
-
-       // create some fixed values
-       addrStr := "D028C9981F7A87F3093672BF0D5B0E2A1B3ED456"
-       pubStr := "3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8"
-       privStr := "27F82582AEFAE7AB151CFB01C48BB6C1A0DA78F9BDDA979A9F70A84D074EB07D3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8"
-       addrBytes, _ := hex.DecodeString(addrStr)
-       pubBytes, _ := hex.DecodeString(pubStr)
-       privBytes, _ := hex.DecodeString(privStr)
-
-       // prepend type byte
-       pubKey, err := crypto.PubKeyFromBytes(append([]byte{1}, pubBytes...))
-       require.Nil(err, "%+v", err)
-       privKey, err := crypto.PrivKeyFromBytes(append([]byte{1}, privBytes...))
-       require.Nil(err, "%+v", err)
-
-       serialized := fmt.Sprintf(`{
-  "address": "%s",
-  "pub_key": {
-    "type": "ed25519",
-    "data": "%s"
-  },
-  "priv_key": {
-    "type": "ed25519",
-    "data": "%s"
-  },
-  "last_height": 0,
-  "last_round": 0,
-  "last_step": 0,
-  "last_signature": null
-}`, addrStr, pubStr, privStr)
-
-       val := PrivValidator{}
-       err = json.Unmarshal([]byte(serialized), &val)
-       require.Nil(err, "%+v", err)
-
-       // make sure the values match
-       assert.EqualValues(addrBytes, val.Address)
-       assert.EqualValues(pubKey, val.PubKey)
-       assert.EqualValues(privKey, val.PrivKey)
-
-       // export it and make sure it is the same
-       out, err := json.Marshal(val)
-       require.Nil(err, "%+v", err)
-       assert.JSONEq(serialized, string(out))
-}
diff --git a/types/proposal.go b/types/proposal.go
deleted file mode 100644 (file)
index 8406403..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-package types
-
-import (
-       "errors"
-       "fmt"
-       "io"
-
-       //. "github.com/tendermint/tmlibs/common"
-       "github.com/tendermint/go-crypto"
-       "github.com/tendermint/go-wire"
-)
-
-var (
-       ErrInvalidBlockPartSignature = errors.New("Error invalid block part signature")
-       ErrInvalidBlockPartHash      = errors.New("Error invalid block part hash")
-)
-
-type Proposal struct {
-       Height           int              `json:"height"`
-       Round            int              `json:"round"`
-       BlockPartsHeader PartSetHeader    `json:"block_parts_header"`
-       POLRound         int              `json:"pol_round"`    // -1 if null.
-       POLBlockID       BlockID          `json:"pol_block_id"` // zero if null.
-       Signature        crypto.Signature `json:"signature"`
-}
-
-// polRound: -1 if no polRound.
-func NewProposal(height int, round int, blockPartsHeader PartSetHeader, polRound int, polBlockID BlockID) *Proposal {
-       return &Proposal{
-               Height:           height,
-               Round:            round,
-               BlockPartsHeader: blockPartsHeader,
-               POLRound:         polRound,
-               POLBlockID:       polBlockID,
-       }
-}
-
-func (p *Proposal) String() string {
-       return fmt.Sprintf("Proposal{%v/%v %v (%v,%v) %v}", p.Height, p.Round,
-               p.BlockPartsHeader, p.POLRound, p.POLBlockID, p.Signature)
-}
-
-func (p *Proposal) WriteSignBytes(chainID string, w io.Writer, n *int, err *error) {
-       wire.WriteJSON(CanonicalJSONOnceProposal{
-               ChainID:  chainID,
-               Proposal: CanonicalProposal(p),
-       }, w, n, err)
-}
diff --git a/types/proposal_test.go b/types/proposal_test.go
deleted file mode 100644 (file)
index 622236b..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-package types
-
-import (
-       "testing"
-)
-
-var testProposal = &Proposal{
-       Height:           12345,
-       Round:            23456,
-       BlockPartsHeader: PartSetHeader{111, []byte("blockparts")},
-       POLRound:         -1,
-}
-
-func TestProposalSignable(t *testing.T) {
-       signBytes := SignBytes("test_chain_id", testProposal)
-       signStr := string(signBytes)
-
-       expected := `{"chain_id":"test_chain_id","proposal":{"block_parts_header":{"hash":"626C6F636B7061727473","total":111},"height":12345,"pol_block_id":{},"pol_round":-1,"round":23456}}`
-       if signStr != expected {
-               t.Errorf("Got unexpected sign string for Proposal. Expected:\n%v\nGot:\n%v", expected, signStr)
-       }
-}
-
-func BenchmarkProposalWriteSignBytes(b *testing.B) {
-       for i := 0; i < b.N; i++ {
-               SignBytes("test_chain_id", testProposal)
-       }
-}
-
-func BenchmarkProposalSign(b *testing.B) {
-       privVal := GenPrivValidator()
-       for i := 0; i < b.N; i++ {
-               privVal.Sign(SignBytes("test_chain_id", testProposal))
-       }
-}
-
-func BenchmarkProposalVerifySignature(b *testing.B) {
-       signBytes := SignBytes("test_chain_id", testProposal)
-       privVal := GenPrivValidator()
-       signature := privVal.Sign(signBytes)
-       pubKey := privVal.PubKey
-
-       for i := 0; i < b.N; i++ {
-               pubKey.VerifyBytes(SignBytes("test_chain_id", testProposal), signature)
-       }
-}
diff --git a/types/protobuf.go b/types/protobuf.go
deleted file mode 100644 (file)
index 59994fe..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-package types
-
-import (
-       "github.com/tendermint/abci/types"
-)
-
-// Convert tendermint types to protobuf types
-var TM2PB = tm2pb{}
-
-type tm2pb struct{}
-
-func (tm2pb) Header(header *Header) *types.Header {
-       return &types.Header{
-               ChainId:        header.ChainID,
-               Height:         uint64(header.Height),
-               Time:           uint64(header.Time.Unix()),
-               NumTxs:         uint64(header.NumTxs),
-               LastBlockId:    TM2PB.BlockID(header.LastBlockID),
-               LastCommitHash: header.LastCommitHash,
-               DataHash:       header.DataHash,
-               AppHash:        header.AppHash,
-       }
-}
-
-func (tm2pb) BlockID(blockID BlockID) *types.BlockID {
-       return &types.BlockID{
-               Hash:  blockID.Hash,
-               Parts: TM2PB.PartSetHeader(blockID.PartsHeader),
-       }
-}
-
-func (tm2pb) PartSetHeader(partSetHeader PartSetHeader) *types.PartSetHeader {
-       return &types.PartSetHeader{
-               Total: uint64(partSetHeader.Total),
-               Hash:  partSetHeader.Hash,
-       }
-}
-
-func (tm2pb) Validator(val *Validator) *types.Validator {
-       return &types.Validator{
-               PubKey: val.PubKey.Bytes(),
-               Power:  uint64(val.VotingPower),
-       }
-}
-
-func (tm2pb) Validators(vals *ValidatorSet) []*types.Validator {
-       validators := make([]*types.Validator, len(vals.Validators))
-       for i, val := range vals.Validators {
-               validators[i] = TM2PB.Validator(val)
-       }
-       return validators
-}
diff --git a/types/services.go b/types/services.go
deleted file mode 100644 (file)
index ee20487..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-package types
-
-import (
-       abci "github.com/tendermint/abci/types"
-)
-
-//------------------------------------------------------
-// blockchain services types
-// NOTE: Interfaces used by RPC must be thread safe!
-//------------------------------------------------------
-
-//------------------------------------------------------
-// mempool
-
-// Updates to the mempool need to be synchronized with committing a block
-// so apps can reset their transient state on Commit
-type Mempool interface {
-       Lock()
-       Unlock()
-
-       Size() int
-       CheckTx(Tx, func(*abci.Response)) error
-       Reap(int) Txs
-       Update(height int, txs Txs)
-       Flush()
-}
-
-type MockMempool struct {
-}
-
-func (m MockMempool) Lock()                                        {}
-func (m MockMempool) Unlock()                                      {}
-func (m MockMempool) Size() int                                    { return 0 }
-func (m MockMempool) CheckTx(tx Tx, cb func(*abci.Response)) error { return nil }
-func (m MockMempool) Reap(n int) Txs                               { return Txs{} }
-func (m MockMempool) Update(height int, txs Txs)                   {}
-func (m MockMempool) Flush()                                       {}
-
-//------------------------------------------------------
-// blockstore
-
-type BlockStoreRPC interface {
-       Height() int
-
-       LoadBlockMeta(height int) *BlockMeta
-       LoadBlock(height int) *Block
-       LoadBlockPart(height int, index int) *Part
-
-       LoadBlockCommit(height int) *Commit
-       LoadSeenCommit(height int) *Commit
-}
-
-type BlockStore interface {
-       BlockStoreRPC
-       SaveBlock(block *Block, blockParts *PartSet, seenCommit *Commit)
-}
diff --git a/types/tx.go b/types/tx.go
deleted file mode 100644 (file)
index 0334452..0000000
+++ /dev/null
@@ -1,119 +0,0 @@
-package types
-
-import (
-       "bytes"
-       "errors"
-       "fmt"
-
-       abci "github.com/tendermint/abci/types"
-       "github.com/tendermint/go-wire/data"
-       "github.com/tendermint/tmlibs/merkle"
-)
-
-type Tx []byte
-
-// NOTE: this is the hash of the go-wire encoded Tx.
-// Tx has no types at this level, so just length-prefixed.
-// Alternatively, it may make sense to add types here and let
-// []byte be type 0x1 so we can have versioned txs if need be in the future.
-func (tx Tx) Hash() []byte {
-       return merkle.SimpleHashFromBinary(tx)
-}
-
-func (tx Tx) String() string {
-       return fmt.Sprintf("Tx{%X}", []byte(tx))
-}
-
-type Txs []Tx
-
-func (txs Txs) Hash() []byte {
-       // Recursive impl.
-       // Copied from tmlibs/merkle to avoid allocations
-       switch len(txs) {
-       case 0:
-               return nil
-       case 1:
-               return txs[0].Hash()
-       default:
-               left := Txs(txs[:(len(txs)+1)/2]).Hash()
-               right := Txs(txs[(len(txs)+1)/2:]).Hash()
-               return merkle.SimpleHashFromTwoHashes(left, right)
-       }
-}
-
-// Index returns the index of this transaction in the list, or -1 if not found
-func (txs Txs) Index(tx Tx) int {
-       for i := range txs {
-               if bytes.Equal(txs[i], tx) {
-                       return i
-               }
-       }
-       return -1
-}
-
-// Index returns the index of this transaction hash in the list, or -1 if not found
-func (txs Txs) IndexByHash(hash []byte) int {
-       for i := range txs {
-               if bytes.Equal(txs[i].Hash(), hash) {
-                       return i
-               }
-       }
-       return -1
-}
-
-// Proof returns a simple merkle proof for this node.
-//
-// Panics if i < 0 or i >= len(txs)
-//
-// TODO: optimize this!
-func (txs Txs) Proof(i int) TxProof {
-       l := len(txs)
-       hashables := make([]merkle.Hashable, l)
-       for i := 0; i < l; i++ {
-               hashables[i] = txs[i]
-       }
-       root, proofs := merkle.SimpleProofsFromHashables(hashables)
-
-       return TxProof{
-               Index:    i,
-               Total:    l,
-               RootHash: root,
-               Data:     txs[i],
-               Proof:    *proofs[i],
-       }
-}
-
-type TxProof struct {
-       Index, Total int
-       RootHash     data.Bytes
-       Data         Tx
-       Proof        merkle.SimpleProof
-}
-
-func (tp TxProof) LeafHash() []byte {
-       return tp.Data.Hash()
-}
-
-// Validate returns nil if it matches the dataHash, and is internally consistent
-// otherwise, returns a sensible error
-func (tp TxProof) Validate(dataHash []byte) error {
-       if !bytes.Equal(dataHash, tp.RootHash) {
-               return errors.New("Proof matches different data hash")
-       }
-
-       valid := tp.Proof.Verify(tp.Index, tp.Total, tp.LeafHash(), tp.RootHash)
-       if !valid {
-               return errors.New("Proof is not internally consistent")
-       }
-       return nil
-}
-
-// TxResult contains results of executing the transaction.
-//
-// One usage is indexing transaction results.
-type TxResult struct {
-       Height uint64                 `json:"height"`
-       Index  uint32                 `json:"index"`
-       Tx     Tx                     `json:"tx"`
-       Result abci.ResponseDeliverTx `json:"result"`
-}
diff --git a/types/tx_test.go b/types/tx_test.go
deleted file mode 100644 (file)
index 91cddec..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-package types
-
-import (
-       "bytes"
-       "testing"
-
-       "github.com/stretchr/testify/assert"
-       wire "github.com/tendermint/go-wire"
-       cmn "github.com/tendermint/tmlibs/common"
-       ctest "github.com/tendermint/tmlibs/test"
-)
-
-func makeTxs(cnt, size int) Txs {
-       txs := make(Txs, cnt)
-       for i := 0; i < cnt; i++ {
-               txs[i] = cmn.RandBytes(size)
-       }
-       return txs
-}
-
-func randInt(low, high int) int {
-       off := cmn.RandInt() % (high - low)
-       return low + off
-}
-
-func TestTxIndex(t *testing.T) {
-       assert := assert.New(t)
-       for i := 0; i < 20; i++ {
-               txs := makeTxs(15, 60)
-               for j := 0; j < len(txs); j++ {
-                       tx := txs[j]
-                       idx := txs.Index(tx)
-                       assert.Equal(j, idx)
-               }
-               assert.Equal(-1, txs.Index(nil))
-               assert.Equal(-1, txs.Index(Tx("foodnwkf")))
-       }
-}
-
-func TestValidTxProof(t *testing.T) {
-       assert := assert.New(t)
-       cases := []struct {
-               txs Txs
-       }{
-               {Txs{{1, 4, 34, 87, 163, 1}}},
-               {Txs{{5, 56, 165, 2}, {4, 77}}},
-               {Txs{Tx("foo"), Tx("bar"), Tx("baz")}},
-               {makeTxs(20, 5)},
-               {makeTxs(7, 81)},
-               {makeTxs(61, 15)},
-       }
-
-       for h, tc := range cases {
-               txs := tc.txs
-               root := txs.Hash()
-               // make sure valid proof for every tx
-               for i := range txs {
-                       leaf := txs[i]
-                       leafHash := leaf.Hash()
-                       proof := txs.Proof(i)
-                       assert.Equal(i, proof.Index, "%d: %d", h, i)
-                       assert.Equal(len(txs), proof.Total, "%d: %d", h, i)
-                       assert.EqualValues(root, proof.RootHash, "%d: %d", h, i)
-                       assert.EqualValues(leaf, proof.Data, "%d: %d", h, i)
-                       assert.EqualValues(leafHash, proof.LeafHash(), "%d: %d", h, i)
-                       assert.Nil(proof.Validate(root), "%d: %d", h, i)
-                       assert.NotNil(proof.Validate([]byte("foobar")), "%d: %d", h, i)
-
-                       // read-write must also work
-                       var p2 TxProof
-                       bin := wire.BinaryBytes(proof)
-                       err := wire.ReadBinaryBytes(bin, &p2)
-                       if assert.Nil(err, "%d: %d: %+v", h, i, err) {
-                               assert.Nil(p2.Validate(root), "%d: %d", h, i)
-                       }
-               }
-       }
-}
-
-func TestTxProofUnchangable(t *testing.T) {
-       // run the other test a bunch...
-       for i := 0; i < 40; i++ {
-               testTxProofUnchangable(t)
-       }
-}
-
-func testTxProofUnchangable(t *testing.T) {
-       assert := assert.New(t)
-
-       // make some proof
-       txs := makeTxs(randInt(2, 100), randInt(16, 128))
-       root := txs.Hash()
-       i := randInt(0, len(txs)-1)
-       proof := txs.Proof(i)
-
-       // make sure it is valid to start with
-       assert.Nil(proof.Validate(root))
-       bin := wire.BinaryBytes(proof)
-
-       // try mutating the data and make sure nothing breaks
-       for j := 0; j < 500; j++ {
-               bad := ctest.MutateByteSlice(bin)
-               if !bytes.Equal(bad, bin) {
-                       assertBadProof(t, root, bad, proof)
-               }
-       }
-}
-
-// this make sure the proof doesn't deserialize into something valid
-func assertBadProof(t *testing.T, root []byte, bad []byte, good TxProof) {
-       var proof TxProof
-       err := wire.ReadBinaryBytes(bad, &proof)
-       if err == nil {
-               err = proof.Validate(root)
-               if err == nil {
-                       // okay, this can happen if we have a slightly different total
-                       // (where the path ends up the same), if it is something else, we have
-                       // a real problem
-                       assert.NotEqual(t, proof.Total, good.Total, "bad: %#v\ngood: %#v", proof, good)
-               }
-       }
-}
diff --git a/types/validator.go b/types/validator.go
deleted file mode 100644 (file)
index 24f8974..0000000
+++ /dev/null
@@ -1,119 +0,0 @@
-package types
-
-import (
-       "bytes"
-       "fmt"
-       "io"
-
-       "github.com/tendermint/go-crypto"
-       "github.com/tendermint/go-wire"
-       "github.com/tendermint/go-wire/data"
-       cmn "github.com/tendermint/tmlibs/common"
-)
-
-// Volatile state for each Validator
-// NOTE: The Accum is not included in Validator.Hash();
-// make sure to update that method if changes are made here
-type Validator struct {
-       Address     data.Bytes    `json:"address"`
-       PubKey      crypto.PubKey `json:"pub_key"`
-       VotingPower int64         `json:"voting_power"`
-
-       Accum int64 `json:"accum"`
-}
-
-func NewValidator(pubKey crypto.PubKey, votingPower int64) *Validator {
-       return &Validator{
-               Address:     pubKey.Address(),
-               PubKey:      pubKey,
-               VotingPower: votingPower,
-               Accum:       0,
-       }
-}
-
-// Creates a new copy of the validator so we can mutate accum.
-// Panics if the validator is nil.
-func (v *Validator) Copy() *Validator {
-       vCopy := *v
-       return &vCopy
-}
-
-// Returns the one with higher Accum.
-func (v *Validator) CompareAccum(other *Validator) *Validator {
-       if v == nil {
-               return other
-       }
-       if v.Accum > other.Accum {
-               return v
-       } else if v.Accum < other.Accum {
-               return other
-       } else {
-               if bytes.Compare(v.Address, other.Address) < 0 {
-                       return v
-               } else if bytes.Compare(v.Address, other.Address) > 0 {
-                       return other
-               } else {
-                       cmn.PanicSanity("Cannot compare identical validators")
-                       return nil
-               }
-       }
-}
-
-func (v *Validator) String() string {
-       if v == nil {
-               return "nil-Validator"
-       }
-       return fmt.Sprintf("Validator{%v %v VP:%v A:%v}",
-               v.Address,
-               v.PubKey,
-               v.VotingPower,
-               v.Accum)
-}
-
-// Hash computes the unique ID of a validator with a given voting power.
-// It exludes the Accum value, which changes with every round.
-func (v *Validator) Hash() []byte {
-       return wire.BinaryRipemd160(struct {
-               Address     data.Bytes
-               PubKey      crypto.PubKey
-               VotingPower int64
-       }{
-               v.Address,
-               v.PubKey,
-               v.VotingPower,
-       })
-}
-
-//-------------------------------------
-
-var ValidatorCodec = validatorCodec{}
-
-type validatorCodec struct{}
-
-func (vc validatorCodec) Encode(o interface{}, w io.Writer, n *int, err *error) {
-       wire.WriteBinary(o.(*Validator), w, n, err)
-}
-
-func (vc validatorCodec) Decode(r io.Reader, n *int, err *error) interface{} {
-       return wire.ReadBinary(&Validator{}, r, 0, n, err)
-}
-
-func (vc validatorCodec) Compare(o1 interface{}, o2 interface{}) int {
-       cmn.PanicSanity("ValidatorCodec.Compare not implemented")
-       return 0
-}
-
-//--------------------------------------------------------------------------------
-// For testing...
-
-func RandValidator(randPower bool, minPower int64) (*Validator, *PrivValidator) {
-       privVal := GenPrivValidator()
-       _, tempFilePath := cmn.Tempfile("priv_validator_")
-       privVal.SetFile(tempFilePath)
-       votePower := minPower
-       if randPower {
-               votePower += int64(cmn.RandUint32())
-       }
-       val := NewValidator(privVal.PubKey, votePower)
-       return val, privVal
-}
diff --git a/types/validator_set.go b/types/validator_set.go
deleted file mode 100644 (file)
index b374df5..0000000
+++ /dev/null
@@ -1,383 +0,0 @@
-package types
-
-import (
-       "bytes"
-       "fmt"
-       "sort"
-       "strings"
-
-       "github.com/tendermint/go-wire"
-       cmn "github.com/tendermint/tmlibs/common"
-       "github.com/tendermint/tmlibs/merkle"
-)
-
-// ValidatorSet represent a set of *Validator at a given height.
-// The validators can be fetched by address or index.
-// The index is in order of .Address, so the indices are fixed
-// for all rounds of a given blockchain height.
-// On the other hand, the .AccumPower of each validator and
-// the designated .GetProposer() of a set changes every round,
-// upon calling .IncrementAccum().
-// NOTE: Not goroutine-safe.
-// NOTE: All get/set to validators should copy the value for safety.
-// TODO: consider validator Accum overflow
-type ValidatorSet struct {
-       // NOTE: persisted via reflect, must be exported.
-       Validators []*Validator `json:"validators"`
-       Proposer   *Validator   `json:"proposer"`
-
-       // cached (unexported)
-       totalVotingPower int64
-}
-
-func NewValidatorSet(vals []*Validator) *ValidatorSet {
-       validators := make([]*Validator, len(vals))
-       for i, val := range vals {
-               validators[i] = val.Copy()
-       }
-       sort.Sort(ValidatorsByAddress(validators))
-       vs := &ValidatorSet{
-               Validators: validators,
-       }
-
-       if vals != nil {
-               vs.IncrementAccum(1)
-       }
-
-       return vs
-}
-
-// incrementAccum and update the proposer
-// TODO: mind the overflow when times and votingPower shares too large.
-func (valSet *ValidatorSet) IncrementAccum(times int) {
-       // Add VotingPower * times to each validator and order into heap.
-       validatorsHeap := cmn.NewHeap()
-       for _, val := range valSet.Validators {
-               val.Accum += int64(val.VotingPower) * int64(times) // TODO: mind overflow
-               validatorsHeap.Push(val, accumComparable{val})
-       }
-
-       // Decrement the validator with most accum times times
-       for i := 0; i < times; i++ {
-               mostest := validatorsHeap.Peek().(*Validator)
-               if i == times-1 {
-                       valSet.Proposer = mostest
-               }
-               mostest.Accum -= int64(valSet.TotalVotingPower())
-               validatorsHeap.Update(mostest, accumComparable{mostest})
-       }
-}
-
-func (valSet *ValidatorSet) Copy() *ValidatorSet {
-       validators := make([]*Validator, len(valSet.Validators))
-       for i, val := range valSet.Validators {
-               // NOTE: must copy, since IncrementAccum updates in place.
-               validators[i] = val.Copy()
-       }
-       return &ValidatorSet{
-               Validators:       validators,
-               Proposer:         valSet.Proposer,
-               totalVotingPower: valSet.totalVotingPower,
-       }
-}
-
-func (valSet *ValidatorSet) HasAddress(address []byte) bool {
-       idx := sort.Search(len(valSet.Validators), func(i int) bool {
-               return bytes.Compare(address, valSet.Validators[i].Address) <= 0
-       })
-       return idx != len(valSet.Validators) && bytes.Compare(valSet.Validators[idx].Address, address) == 0
-}
-
-func (valSet *ValidatorSet) GetByAddress(address []byte) (index int, val *Validator) {
-       idx := sort.Search(len(valSet.Validators), func(i int) bool {
-               return bytes.Compare(address, valSet.Validators[i].Address) <= 0
-       })
-       if idx != len(valSet.Validators) && bytes.Compare(valSet.Validators[idx].Address, address) == 0 {
-               return idx, valSet.Validators[idx].Copy()
-       } else {
-               return 0, nil
-       }
-}
-
-func (valSet *ValidatorSet) GetByIndex(index int) (address []byte, val *Validator) {
-       val = valSet.Validators[index]
-       return val.Address, val.Copy()
-}
-
-func (valSet *ValidatorSet) Size() int {
-       return len(valSet.Validators)
-}
-
-func (valSet *ValidatorSet) TotalVotingPower() int64 {
-       if valSet.totalVotingPower == 0 {
-               for _, val := range valSet.Validators {
-                       valSet.totalVotingPower += val.VotingPower
-               }
-       }
-       return valSet.totalVotingPower
-}
-
-func (valSet *ValidatorSet) GetProposer() (proposer *Validator) {
-       if len(valSet.Validators) == 0 {
-               return nil
-       }
-       if valSet.Proposer == nil {
-               valSet.Proposer = valSet.findProposer()
-       }
-       return valSet.Proposer.Copy()
-}
-
-func (valSet *ValidatorSet) findProposer() *Validator {
-       var proposer *Validator
-       for _, val := range valSet.Validators {
-               if proposer == nil || !bytes.Equal(val.Address, proposer.Address) {
-                       proposer = proposer.CompareAccum(val)
-               }
-       }
-       return proposer
-}
-
-func (valSet *ValidatorSet) Hash() []byte {
-       if len(valSet.Validators) == 0 {
-               return nil
-       }
-       hashables := make([]merkle.Hashable, len(valSet.Validators))
-       for i, val := range valSet.Validators {
-               hashables[i] = val
-       }
-       return merkle.SimpleHashFromHashables(hashables)
-}
-
-func (valSet *ValidatorSet) Add(val *Validator) (added bool) {
-       val = val.Copy()
-       idx := sort.Search(len(valSet.Validators), func(i int) bool {
-               return bytes.Compare(val.Address, valSet.Validators[i].Address) <= 0
-       })
-       if idx == len(valSet.Validators) {
-               valSet.Validators = append(valSet.Validators, val)
-               // Invalidate cache
-               valSet.Proposer = nil
-               valSet.totalVotingPower = 0
-               return true
-       } else if bytes.Compare(valSet.Validators[idx].Address, val.Address) == 0 {
-               return false
-       } else {
-               newValidators := make([]*Validator, len(valSet.Validators)+1)
-               copy(newValidators[:idx], valSet.Validators[:idx])
-               newValidators[idx] = val
-               copy(newValidators[idx+1:], valSet.Validators[idx:])
-               valSet.Validators = newValidators
-               // Invalidate cache
-               valSet.Proposer = nil
-               valSet.totalVotingPower = 0
-               return true
-       }
-}
-
-func (valSet *ValidatorSet) Update(val *Validator) (updated bool) {
-       index, sameVal := valSet.GetByAddress(val.Address)
-       if sameVal == nil {
-               return false
-       } else {
-               valSet.Validators[index] = val.Copy()
-               // Invalidate cache
-               valSet.Proposer = nil
-               valSet.totalVotingPower = 0
-               return true
-       }
-}
-
-func (valSet *ValidatorSet) Remove(address []byte) (val *Validator, removed bool) {
-       idx := sort.Search(len(valSet.Validators), func(i int) bool {
-               return bytes.Compare(address, valSet.Validators[i].Address) <= 0
-       })
-       if idx == len(valSet.Validators) || bytes.Compare(valSet.Validators[idx].Address, address) != 0 {
-               return nil, false
-       } else {
-               removedVal := valSet.Validators[idx]
-               newValidators := valSet.Validators[:idx]
-               if idx+1 < len(valSet.Validators) {
-                       newValidators = append(newValidators, valSet.Validators[idx+1:]...)
-               }
-               valSet.Validators = newValidators
-               // Invalidate cache
-               valSet.Proposer = nil
-               valSet.totalVotingPower = 0
-               return removedVal, true
-       }
-}
-
-func (valSet *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) {
-       for i, val := range valSet.Validators {
-               stop := fn(i, val.Copy())
-               if stop {
-                       break
-               }
-       }
-}
-
-// Verify that +2/3 of the set had signed the given signBytes
-func (valSet *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height int, commit *Commit) error {
-       if valSet.Size() != len(commit.Precommits) {
-               return fmt.Errorf("Invalid commit -- wrong set size: %v vs %v", valSet.Size(), len(commit.Precommits))
-       }
-       if height != commit.Height() {
-               return fmt.Errorf("Invalid commit -- wrong height: %v vs %v", height, commit.Height())
-       }
-
-       talliedVotingPower := int64(0)
-       round := commit.Round()
-
-       for idx, precommit := range commit.Precommits {
-               // may be nil if validator skipped.
-               if precommit == nil {
-                       continue
-               }
-               if precommit.Height != height {
-                       return fmt.Errorf("Invalid commit -- wrong height: %v vs %v", height, precommit.Height)
-               }
-               if precommit.Round != round {
-                       return fmt.Errorf("Invalid commit -- wrong round: %v vs %v", round, precommit.Round)
-               }
-               if precommit.Type != VoteTypePrecommit {
-                       return fmt.Errorf("Invalid commit -- not precommit @ index %v", idx)
-               }
-               _, val := valSet.GetByIndex(idx)
-               // Validate signature
-               precommitSignBytes := SignBytes(chainID, precommit)
-               if !val.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) {
-                       return fmt.Errorf("Invalid commit -- invalid signature: %v", precommit)
-               }
-               if !blockID.Equals(precommit.BlockID) {
-                       continue // Not an error, but doesn't count
-               }
-               // Good precommit!
-               talliedVotingPower += val.VotingPower
-       }
-
-       if talliedVotingPower > valSet.TotalVotingPower()*2/3 {
-               return nil
-       } else {
-               return fmt.Errorf("Invalid commit -- insufficient voting power: got %v, needed %v",
-                       talliedVotingPower, (valSet.TotalVotingPower()*2/3 + 1))
-       }
-}
-
-// Verify that +2/3 of this set had signed the given signBytes.
-// Unlike VerifyCommit(), this function can verify commits with differeent sets.
-func (valSet *ValidatorSet) VerifyCommitAny(chainID string, blockID BlockID, height int, commit *Commit) error {
-       panic("Not yet implemented")
-       /*
-                       Start like:
-
-               FOR_LOOP:
-                       for _, val := range vals {
-                               if len(precommits) == 0 {
-                                       break FOR_LOOP
-                               }
-                               next := precommits[0]
-                               switch bytes.Compare(val.Address(), next.ValidatorAddress) {
-                               case -1:
-                                       continue FOR_LOOP
-                               case 0:
-                                       signBytes := tm.SignBytes(next)
-                                       ...
-                               case 1:
-                                       ... // error?
-                               }
-                       }
-       */
-}
-
-func (valSet *ValidatorSet) ToBytes() []byte {
-       buf, n, err := new(bytes.Buffer), new(int), new(error)
-       wire.WriteBinary(valSet, buf, n, err)
-       if *err != nil {
-               cmn.PanicCrisis(*err)
-       }
-       return buf.Bytes()
-}
-
-func (valSet *ValidatorSet) FromBytes(b []byte) {
-       r, n, err := bytes.NewReader(b), new(int), new(error)
-       wire.ReadBinary(valSet, r, 0, n, err)
-       if *err != nil {
-               // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED
-               cmn.PanicCrisis(*err)
-       }
-}
-
-func (valSet *ValidatorSet) String() string {
-       return valSet.StringIndented("")
-}
-
-func (valSet *ValidatorSet) StringIndented(indent string) string {
-       if valSet == nil {
-               return "nil-ValidatorSet"
-       }
-       valStrings := []string{}
-       valSet.Iterate(func(index int, val *Validator) bool {
-               valStrings = append(valStrings, val.String())
-               return false
-       })
-       return fmt.Sprintf(`ValidatorSet{
-%s  Proposer: %v
-%s  Validators:
-%s    %v
-%s}`,
-               indent, valSet.GetProposer().String(),
-               indent,
-               indent, strings.Join(valStrings, "\n"+indent+"    "),
-               indent)
-
-}
-
-//-------------------------------------
-// Implements sort for sorting validators by address.
-
-type ValidatorsByAddress []*Validator
-
-func (vs ValidatorsByAddress) Len() int {
-       return len(vs)
-}
-
-func (vs ValidatorsByAddress) Less(i, j int) bool {
-       return bytes.Compare(vs[i].Address, vs[j].Address) == -1
-}
-
-func (vs ValidatorsByAddress) Swap(i, j int) {
-       it := vs[i]
-       vs[i] = vs[j]
-       vs[j] = it
-}
-
-//-------------------------------------
-// Use with Heap for sorting validators by accum
-
-type accumComparable struct {
-       *Validator
-}
-
-// We want to find the validator with the greatest accum.
-func (ac accumComparable) Less(o interface{}) bool {
-       other := o.(accumComparable).Validator
-       larger := ac.CompareAccum(other)
-       return bytes.Equal(larger.Address, ac.Address)
-}
-
-//----------------------------------------
-// For testing
-
-// NOTE: PrivValidator are in order.
-func RandValidatorSet(numValidators int, votingPower int64) (*ValidatorSet, []*PrivValidator) {
-       vals := make([]*Validator, numValidators)
-       privValidators := make([]*PrivValidator, numValidators)
-       for i := 0; i < numValidators; i++ {
-               val, privValidator := RandValidator(false, votingPower)
-               vals[i] = val
-               privValidators[i] = privValidator
-       }
-       valSet := NewValidatorSet(vals)
-       sort.Sort(PrivValidatorsByAddress(privValidators))
-       return valSet, privValidators
-}
diff --git a/types/validator_set_test.go b/types/validator_set_test.go
deleted file mode 100644 (file)
index 71a1993..0000000
+++ /dev/null
@@ -1,208 +0,0 @@
-package types
-
-import (
-       "bytes"
-       "strings"
-       "testing"
-
-       cmn "github.com/tendermint/tmlibs/common"
-       "github.com/tendermint/go-crypto"
-)
-
-func randPubKey() crypto.PubKey {
-       var pubKey [32]byte
-       copy(pubKey[:], cmn.RandBytes(32))
-       return crypto.PubKeyEd25519(pubKey).Wrap()
-}
-
-func randValidator_() *Validator {
-       val := NewValidator(randPubKey(), cmn.RandInt64())
-       val.Accum = cmn.RandInt64()
-       return val
-}
-
-func randValidatorSet(numValidators int) *ValidatorSet {
-       validators := make([]*Validator, numValidators)
-       for i := 0; i < numValidators; i++ {
-               validators[i] = randValidator_()
-       }
-       return NewValidatorSet(validators)
-}
-
-func TestCopy(t *testing.T) {
-       vset := randValidatorSet(10)
-       vsetHash := vset.Hash()
-       if len(vsetHash) == 0 {
-               t.Fatalf("ValidatorSet had unexpected zero hash")
-       }
-
-       vsetCopy := vset.Copy()
-       vsetCopyHash := vsetCopy.Hash()
-
-       if !bytes.Equal(vsetHash, vsetCopyHash) {
-               t.Fatalf("ValidatorSet copy had wrong hash. Orig: %X, Copy: %X", vsetHash, vsetCopyHash)
-       }
-}
-
-func TestProposerSelection1(t *testing.T) {
-       vset := NewValidatorSet([]*Validator{
-               newValidator([]byte("foo"), 1000),
-               newValidator([]byte("bar"), 300),
-               newValidator([]byte("baz"), 330),
-       })
-       proposers := []string{}
-       for i := 0; i < 99; i++ {
-               val := vset.GetProposer()
-               proposers = append(proposers, string(val.Address))
-               vset.IncrementAccum(1)
-       }
-       expected := `foo baz foo bar foo foo baz foo bar foo foo baz foo foo bar foo baz foo foo bar foo foo baz foo bar foo foo baz foo bar foo foo baz foo foo bar foo baz foo foo bar foo baz foo foo bar foo baz foo foo bar foo baz foo foo foo baz bar foo foo foo baz foo bar foo foo baz foo bar foo foo baz foo bar foo foo baz foo bar foo foo baz foo foo bar foo baz foo foo bar foo baz foo foo bar foo baz foo foo`
-       if expected != strings.Join(proposers, " ") {
-               t.Errorf("Expected sequence of proposers was\n%v\nbut got \n%v", expected, strings.Join(proposers, " "))
-       }
-}
-
-func newValidator(address []byte, power int64) *Validator {
-       return &Validator{Address: address, VotingPower: power}
-}
-
-func TestProposerSelection2(t *testing.T) {
-       addr0 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
-       addr1 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}
-       addr2 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2}
-
-       // when all voting power is same, we go in order of addresses
-       val0, val1, val2 := newValidator(addr0, 100), newValidator(addr1, 100), newValidator(addr2, 100)
-       valList := []*Validator{val0, val1, val2}
-       vals := NewValidatorSet(valList)
-       for i := 0; i < len(valList)*5; i++ {
-               ii := (i) % len(valList)
-               prop := vals.GetProposer()
-               if !bytes.Equal(prop.Address, valList[ii].Address) {
-                       t.Fatalf("(%d): Expected %X. Got %X", i, valList[ii].Address, prop.Address)
-               }
-               vals.IncrementAccum(1)
-       }
-
-       // One validator has more than the others, but not enough to propose twice in a row
-       *val2 = *newValidator(addr2, 400)
-       vals = NewValidatorSet(valList)
-       // vals.IncrementAccum(1)
-       prop := vals.GetProposer()
-       if !bytes.Equal(prop.Address, addr2) {
-               t.Fatalf("Expected address with highest voting power to be first proposer. Got %X", prop.Address)
-       }
-       vals.IncrementAccum(1)
-       prop = vals.GetProposer()
-       if !bytes.Equal(prop.Address, addr0) {
-               t.Fatalf("Expected smallest address to be validator. Got %X", prop.Address)
-       }
-
-       // One validator has more than the others, and enough to be proposer twice in a row
-       *val2 = *newValidator(addr2, 401)
-       vals = NewValidatorSet(valList)
-       prop = vals.GetProposer()
-       if !bytes.Equal(prop.Address, addr2) {
-               t.Fatalf("Expected address with highest voting power to be first proposer. Got %X", prop.Address)
-       }
-       vals.IncrementAccum(1)
-       prop = vals.GetProposer()
-       if !bytes.Equal(prop.Address, addr2) {
-               t.Fatalf("Expected address with highest voting power to be second proposer. Got %X", prop.Address)
-       }
-       vals.IncrementAccum(1)
-       prop = vals.GetProposer()
-       if !bytes.Equal(prop.Address, addr0) {
-               t.Fatalf("Expected smallest address to be validator. Got %X", prop.Address)
-       }
-
-       // each validator should be the proposer a proportional number of times
-       val0, val1, val2 = newValidator(addr0, 4), newValidator(addr1, 5), newValidator(addr2, 3)
-       valList = []*Validator{val0, val1, val2}
-       propCount := make([]int, 3)
-       vals = NewValidatorSet(valList)
-       N := 1
-       for i := 0; i < 120*N; i++ {
-               prop := vals.GetProposer()
-               ii := prop.Address[19]
-               propCount[ii] += 1
-               vals.IncrementAccum(1)
-       }
-
-       if propCount[0] != 40*N {
-               t.Fatalf("Expected prop count for validator with 4/12 of voting power to be %d/%d. Got %d/%d", 40*N, 120*N, propCount[0], 120*N)
-       }
-       if propCount[1] != 50*N {
-               t.Fatalf("Expected prop count for validator with 5/12 of voting power to be %d/%d. Got %d/%d", 50*N, 120*N, propCount[1], 120*N)
-       }
-       if propCount[2] != 30*N {
-               t.Fatalf("Expected prop count for validator with 3/12 of voting power to be %d/%d. Got %d/%d", 30*N, 120*N, propCount[2], 120*N)
-       }
-}
-
-func TestProposerSelection3(t *testing.T) {
-       vset := NewValidatorSet([]*Validator{
-               newValidator([]byte("a"), 1),
-               newValidator([]byte("b"), 1),
-               newValidator([]byte("c"), 1),
-               newValidator([]byte("d"), 1),
-       })
-
-       proposerOrder := make([]*Validator, 4)
-       for i := 0; i < 4; i++ {
-               proposerOrder[i] = vset.GetProposer()
-               vset.IncrementAccum(1)
-       }
-
-       // i for the loop
-       // j for the times
-       // we should go in order for ever, despite some IncrementAccums with times > 1
-       var i, j int
-       for ; i < 10000; i++ {
-               got := vset.GetProposer().Address
-               expected := proposerOrder[j%4].Address
-               if !bytes.Equal(got, expected) {
-                       t.Fatalf(cmn.Fmt("vset.Proposer (%X) does not match expected proposer (%X) for (%d, %d)", got, expected, i, j))
-               }
-
-               // serialize, deserialize, check proposer
-               b := vset.ToBytes()
-               vset.FromBytes(b)
-
-               computed := vset.GetProposer() // findGetProposer()
-               if i != 0 {
-                       if !bytes.Equal(got, computed.Address) {
-                               t.Fatalf(cmn.Fmt("vset.Proposer (%X) does not match computed proposer (%X) for (%d, %d)", got, computed.Address, i, j))
-                       }
-               }
-
-               // times is usually 1
-               times := 1
-               mod := (cmn.RandInt() % 5) + 1
-               if cmn.RandInt()%mod > 0 {
-                       // sometimes its up to 5
-                       times = cmn.RandInt() % 5
-               }
-               vset.IncrementAccum(times)
-
-               j += times
-       }
-}
-
-func BenchmarkValidatorSetCopy(b *testing.B) {
-       b.StopTimer()
-       vset := NewValidatorSet([]*Validator{})
-       for i := 0; i < 1000; i++ {
-               privKey := crypto.GenPrivKeyEd25519()
-               pubKey := privKey.PubKey()
-               val := NewValidator(pubKey, 0)
-               if !vset.Add(val) {
-                       panic("Failed to add validator")
-               }
-       }
-       b.StartTimer()
-
-       for i := 0; i < b.N; i++ {
-               vset.Copy()
-       }
-}
diff --git a/types/vote.go b/types/vote.go
deleted file mode 100644 (file)
index 164293c..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-package types
-
-import (
-       "errors"
-       "fmt"
-       "io"
-
-       "github.com/tendermint/go-crypto"
-       "github.com/tendermint/go-wire"
-       "github.com/tendermint/go-wire/data"
-       cmn "github.com/tendermint/tmlibs/common"
-)
-
-var (
-       ErrVoteUnexpectedStep          = errors.New("Unexpected step")
-       ErrVoteInvalidValidatorIndex   = errors.New("Invalid round vote validator index")
-       ErrVoteInvalidValidatorAddress = errors.New("Invalid round vote validator address")
-       ErrVoteInvalidSignature        = errors.New("Invalid round vote signature")
-       ErrVoteInvalidBlockHash        = errors.New("Invalid block hash")
-)
-
-type ErrVoteConflictingVotes struct {
-       VoteA *Vote
-       VoteB *Vote
-}
-
-func (err *ErrVoteConflictingVotes) Error() string {
-       return "Conflicting votes"
-}
-
-// Types of votes
-// TODO Make a new type "VoteType"
-const (
-       VoteTypePrevote   = byte(0x01)
-       VoteTypePrecommit = byte(0x02)
-)
-
-func IsVoteTypeValid(type_ byte) bool {
-       switch type_ {
-       case VoteTypePrevote:
-               return true
-       case VoteTypePrecommit:
-               return true
-       default:
-               return false
-       }
-}
-
-// Represents a prevote, precommit, or commit vote from validators for consensus.
-type Vote struct {
-       ValidatorAddress data.Bytes       `json:"validator_address"`
-       ValidatorIndex   int              `json:"validator_index"`
-       Height           int              `json:"height"`
-       Round            int              `json:"round"`
-       Type             byte             `json:"type"`
-       BlockID          BlockID          `json:"block_id"` // zero if vote is nil.
-       Signature        crypto.Signature `json:"signature"`
-}
-
-func (vote *Vote) WriteSignBytes(chainID string, w io.Writer, n *int, err *error) {
-       wire.WriteJSON(CanonicalJSONOnceVote{
-               chainID,
-               CanonicalVote(vote),
-       }, w, n, err)
-}
-
-func (vote *Vote) Copy() *Vote {
-       voteCopy := *vote
-       return &voteCopy
-}
-
-func (vote *Vote) String() string {
-       if vote == nil {
-               return "nil-Vote"
-       }
-       var typeString string
-       switch vote.Type {
-       case VoteTypePrevote:
-               typeString = "Prevote"
-       case VoteTypePrecommit:
-               typeString = "Precommit"
-       default:
-               cmn.PanicSanity("Unknown vote type")
-       }
-
-       return fmt.Sprintf("Vote{%v:%X %v/%02d/%v(%v) %X %v}",
-               vote.ValidatorIndex, cmn.Fingerprint(vote.ValidatorAddress),
-               vote.Height, vote.Round, vote.Type, typeString,
-               cmn.Fingerprint(vote.BlockID.Hash), vote.Signature)
-}
diff --git a/types/vote_set.go b/types/vote_set.go
deleted file mode 100644 (file)
index 938dbcb..0000000
+++ /dev/null
@@ -1,527 +0,0 @@
-package types
-
-import (
-       "bytes"
-       "fmt"
-       "strings"
-       "sync"
-
-       . "github.com/tendermint/tmlibs/common"
-)
-
-/*
-       VoteSet helps collect signatures from validators at each height+round for a
-       predefined vote type.
-
-       We need VoteSet to be able to keep track of conflicting votes when validators
-       double-sign.  Yet, we can't keep track of *all* the votes seen, as that could
-       be a DoS attack vector.
-
-       There are two storage areas for votes.
-       1. voteSet.votes
-       2. voteSet.votesByBlock
-
-       `.votes` is the "canonical" list of votes.  It always has at least one vote,
-       if a vote from a validator had been seen at all.  Usually it keeps track of
-       the first vote seen, but when a 2/3 majority is found, votes for that get
-       priority and are copied over from `.votesByBlock`.
-
-       `.votesByBlock` keeps track of a list of votes for a particular block.  There
-       are two ways a &blockVotes{} gets created in `.votesByBlock`.
-       1. the first vote seen by a validator was for the particular block.
-       2. a peer claims to have seen 2/3 majority for the particular block.
-
-       Since the first vote from a validator will always get added in `.votesByBlock`
-       , all votes in `.votes` will have a corresponding entry in `.votesByBlock`.
-
-       When a &blockVotes{} in `.votesByBlock` reaches a 2/3 majority quorum, its
-       votes are copied into `.votes`.
-
-       All this is memory bounded because conflicting votes only get added if a peer
-       told us to track that block, each peer only gets to tell us 1 such block, and,
-       there's only a limited number of peers.
-
-       NOTE: Assumes that the sum total of voting power does not exceed MaxUInt64.
-*/
-type VoteSet struct {
-       chainID string
-       height  int
-       round   int
-       type_   byte
-
-       mtx           sync.Mutex
-       valSet        *ValidatorSet
-       votesBitArray *BitArray
-       votes         []*Vote                // Primary votes to share
-       sum           int64                  // Sum of voting power for seen votes, discounting conflicts
-       maj23         *BlockID               // First 2/3 majority seen
-       votesByBlock  map[string]*blockVotes // string(blockHash|blockParts) -> blockVotes
-       peerMaj23s    map[string]BlockID     // Maj23 for each peer
-}
-
-// Constructs a new VoteSet struct used to accumulate votes for given height/round.
-func NewVoteSet(chainID string, height int, round int, type_ byte, valSet *ValidatorSet) *VoteSet {
-       if height == 0 {
-               PanicSanity("Cannot make VoteSet for height == 0, doesn't make sense.")
-       }
-       return &VoteSet{
-               chainID:       chainID,
-               height:        height,
-               round:         round,
-               type_:         type_,
-               valSet:        valSet,
-               votesBitArray: NewBitArray(valSet.Size()),
-               votes:         make([]*Vote, valSet.Size()),
-               sum:           0,
-               maj23:         nil,
-               votesByBlock:  make(map[string]*blockVotes, valSet.Size()),
-               peerMaj23s:    make(map[string]BlockID),
-       }
-}
-
-func (voteSet *VoteSet) ChainID() string {
-       return voteSet.chainID
-}
-
-func (voteSet *VoteSet) Height() int {
-       if voteSet == nil {
-               return 0
-       } else {
-               return voteSet.height
-       }
-}
-
-func (voteSet *VoteSet) Round() int {
-       if voteSet == nil {
-               return -1
-       } else {
-               return voteSet.round
-       }
-}
-
-func (voteSet *VoteSet) Type() byte {
-       if voteSet == nil {
-               return 0x00
-       } else {
-               return voteSet.type_
-       }
-}
-
-func (voteSet *VoteSet) Size() int {
-       if voteSet == nil {
-               return 0
-       } else {
-               return voteSet.valSet.Size()
-       }
-}
-
-// Returns added=true if vote is valid and new.
-// Otherwise returns err=ErrVote[
-//             UnexpectedStep | InvalidIndex | InvalidAddress |
-//             InvalidSignature | InvalidBlockHash | ConflictingVotes ]
-// Duplicate votes return added=false, err=nil.
-// Conflicting votes return added=*, err=ErrVoteConflictingVotes.
-// NOTE: vote should not be mutated after adding.
-// NOTE: VoteSet must not be nil
-func (voteSet *VoteSet) AddVote(vote *Vote) (added bool, err error) {
-       if voteSet == nil {
-               PanicSanity("AddVote() on nil VoteSet")
-       }
-       voteSet.mtx.Lock()
-       defer voteSet.mtx.Unlock()
-
-       return voteSet.addVote(vote)
-}
-
-// NOTE: Validates as much as possible before attempting to verify the signature.
-func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) {
-       valIndex := vote.ValidatorIndex
-       valAddr := vote.ValidatorAddress
-       blockKey := vote.BlockID.Key()
-
-       // Ensure that validator index was set
-       if valIndex < 0 || len(valAddr) == 0 {
-               panic("Validator index or address was not set in vote.")
-       }
-
-       // Make sure the step matches.
-       if (vote.Height != voteSet.height) ||
-               (vote.Round != voteSet.round) ||
-               (vote.Type != voteSet.type_) {
-               return false, ErrVoteUnexpectedStep
-       }
-
-       // Ensure that signer is a validator.
-       lookupAddr, val := voteSet.valSet.GetByIndex(valIndex)
-       if val == nil {
-               return false, ErrVoteInvalidValidatorIndex
-       }
-
-       // Ensure that the signer has the right address
-       if !bytes.Equal(valAddr, lookupAddr) {
-               return false, ErrVoteInvalidValidatorAddress
-       }
-
-       // If we already know of this vote, return false.
-       if existing, ok := voteSet.getVote(valIndex, blockKey); ok {
-               if existing.Signature.Equals(vote.Signature) {
-                       return false, nil // duplicate
-               } else {
-                       return false, ErrVoteInvalidSignature // NOTE: assumes deterministic signatures
-               }
-       }
-
-       // Check signature.
-       if !val.PubKey.VerifyBytes(SignBytes(voteSet.chainID, vote), vote.Signature) {
-               // Bad signature.
-               return false, ErrVoteInvalidSignature
-       }
-
-       // Add vote and get conflicting vote if any
-       added, conflicting := voteSet.addVerifiedVote(vote, blockKey, val.VotingPower)
-       if conflicting != nil {
-               return added, &ErrVoteConflictingVotes{
-                       VoteA: conflicting,
-                       VoteB: vote,
-               }
-       } else {
-               if !added {
-                       PanicSanity("Expected to add non-conflicting vote")
-               }
-               return added, nil
-       }
-
-}
-
-// Returns (vote, true) if vote exists for valIndex and blockKey
-func (voteSet *VoteSet) getVote(valIndex int, blockKey string) (vote *Vote, ok bool) {
-       if existing := voteSet.votes[valIndex]; existing != nil && existing.BlockID.Key() == blockKey {
-               return existing, true
-       }
-       if existing := voteSet.votesByBlock[blockKey].getByIndex(valIndex); existing != nil {
-               return existing, true
-       }
-       return nil, false
-}
-
-// Assumes signature is valid.
-// If conflicting vote exists, returns it.
-func (voteSet *VoteSet) addVerifiedVote(vote *Vote, blockKey string, votingPower int64) (added bool, conflicting *Vote) {
-       valIndex := vote.ValidatorIndex
-
-       // Already exists in voteSet.votes?
-       if existing := voteSet.votes[valIndex]; existing != nil {
-               if existing.BlockID.Equals(vote.BlockID) {
-                       PanicSanity("addVerifiedVote does not expect duplicate votes")
-               } else {
-                       conflicting = existing
-               }
-               // Replace vote if blockKey matches voteSet.maj23.
-               if voteSet.maj23 != nil && voteSet.maj23.Key() == blockKey {
-                       voteSet.votes[valIndex] = vote
-                       voteSet.votesBitArray.SetIndex(valIndex, true)
-               }
-               // Otherwise don't add it to voteSet.votes
-       } else {
-               // Add to voteSet.votes and incr .sum
-               voteSet.votes[valIndex] = vote
-               voteSet.votesBitArray.SetIndex(valIndex, true)
-               voteSet.sum += votingPower
-       }
-
-       votesByBlock, ok := voteSet.votesByBlock[blockKey]
-       if ok {
-               if conflicting != nil && !votesByBlock.peerMaj23 {
-                       // There's a conflict and no peer claims that this block is special.
-                       return false, conflicting
-               }
-               // We'll add the vote in a bit.
-       } else {
-               // .votesByBlock doesn't exist...
-               if conflicting != nil {
-                       // ... and there's a conflicting vote.
-                       // We're not even tracking this blockKey, so just forget it.
-                       return false, conflicting
-               } else {
-                       // ... and there's no conflicting vote.
-                       // Start tracking this blockKey
-                       votesByBlock = newBlockVotes(false, voteSet.valSet.Size())
-                       voteSet.votesByBlock[blockKey] = votesByBlock
-                       // We'll add the vote in a bit.
-               }
-       }
-
-       // Before adding to votesByBlock, see if we'll exceed quorum
-       origSum := votesByBlock.sum
-       quorum := voteSet.valSet.TotalVotingPower()*2/3 + 1
-
-       // Add vote to votesByBlock
-       votesByBlock.addVerifiedVote(vote, votingPower)
-
-       // If we just crossed the quorum threshold and have 2/3 majority...
-       if origSum < quorum && quorum <= votesByBlock.sum {
-               // Only consider the first quorum reached
-               if voteSet.maj23 == nil {
-                       maj23BlockID := vote.BlockID
-                       voteSet.maj23 = &maj23BlockID
-                       // And also copy votes over to voteSet.votes
-                       for i, vote := range votesByBlock.votes {
-                               if vote != nil {
-                                       voteSet.votes[i] = vote
-                               }
-                       }
-               }
-       }
-
-       return true, conflicting
-}
-
-// If a peer claims that it has 2/3 majority for given blockKey, call this.
-// NOTE: if there are too many peers, or too much peer churn,
-// this can cause memory issues.
-// TODO: implement ability to remove peers too
-// NOTE: VoteSet must not be nil
-func (voteSet *VoteSet) SetPeerMaj23(peerID string, blockID BlockID) {
-       if voteSet == nil {
-               PanicSanity("SetPeerMaj23() on nil VoteSet")
-       }
-       voteSet.mtx.Lock()
-       defer voteSet.mtx.Unlock()
-
-       blockKey := blockID.Key()
-
-       // Make sure peer hasn't already told us something.
-       if existing, ok := voteSet.peerMaj23s[peerID]; ok {
-               if existing.Equals(blockID) {
-                       return // Nothing to do
-               } else {
-                       return // TODO bad peer!
-               }
-       }
-       voteSet.peerMaj23s[peerID] = blockID
-
-       // Create .votesByBlock entry if needed.
-       votesByBlock, ok := voteSet.votesByBlock[blockKey]
-       if ok {
-               if votesByBlock.peerMaj23 {
-                       return // Nothing to do
-               } else {
-                       votesByBlock.peerMaj23 = true
-                       // No need to copy votes, already there.
-               }
-       } else {
-               votesByBlock = newBlockVotes(true, voteSet.valSet.Size())
-               voteSet.votesByBlock[blockKey] = votesByBlock
-               // No need to copy votes, no votes to copy over.
-       }
-}
-
-func (voteSet *VoteSet) BitArray() *BitArray {
-       if voteSet == nil {
-               return nil
-       }
-       voteSet.mtx.Lock()
-       defer voteSet.mtx.Unlock()
-       return voteSet.votesBitArray.Copy()
-}
-
-func (voteSet *VoteSet) BitArrayByBlockID(blockID BlockID) *BitArray {
-       if voteSet == nil {
-               return nil
-       }
-       voteSet.mtx.Lock()
-       defer voteSet.mtx.Unlock()
-       votesByBlock, ok := voteSet.votesByBlock[blockID.Key()]
-       if ok {
-               return votesByBlock.bitArray.Copy()
-       }
-       return nil
-}
-
-// NOTE: if validator has conflicting votes, returns "canonical" vote
-func (voteSet *VoteSet) GetByIndex(valIndex int) *Vote {
-       if voteSet == nil {
-               return nil
-       }
-       voteSet.mtx.Lock()
-       defer voteSet.mtx.Unlock()
-       return voteSet.votes[valIndex]
-}
-
-func (voteSet *VoteSet) GetByAddress(address []byte) *Vote {
-       if voteSet == nil {
-               return nil
-       }
-       voteSet.mtx.Lock()
-       defer voteSet.mtx.Unlock()
-       valIndex, val := voteSet.valSet.GetByAddress(address)
-       if val == nil {
-               PanicSanity("GetByAddress(address) returned nil")
-       }
-       return voteSet.votes[valIndex]
-}
-
-func (voteSet *VoteSet) HasTwoThirdsMajority() bool {
-       if voteSet == nil {
-               return false
-       }
-       voteSet.mtx.Lock()
-       defer voteSet.mtx.Unlock()
-       return voteSet.maj23 != nil
-}
-
-func (voteSet *VoteSet) IsCommit() bool {
-       if voteSet == nil {
-               return false
-       }
-       if voteSet.type_ != VoteTypePrecommit {
-               return false
-       }
-       voteSet.mtx.Lock()
-       defer voteSet.mtx.Unlock()
-       return voteSet.maj23 != nil
-}
-
-func (voteSet *VoteSet) HasTwoThirdsAny() bool {
-       if voteSet == nil {
-               return false
-       }
-       voteSet.mtx.Lock()
-       defer voteSet.mtx.Unlock()
-       return voteSet.sum > voteSet.valSet.TotalVotingPower()*2/3
-}
-
-func (voteSet *VoteSet) HasAll() bool {
-       return voteSet.sum == voteSet.valSet.TotalVotingPower()
-}
-
-// Returns either a blockhash (or nil) that received +2/3 majority.
-// If there exists no such majority, returns (nil, PartSetHeader{}, false).
-func (voteSet *VoteSet) TwoThirdsMajority() (blockID BlockID, ok bool) {
-       if voteSet == nil {
-               return BlockID{}, false
-       }
-       voteSet.mtx.Lock()
-       defer voteSet.mtx.Unlock()
-       if voteSet.maj23 != nil {
-               return *voteSet.maj23, true
-       } else {
-               return BlockID{}, false
-       }
-}
-
-func (voteSet *VoteSet) String() string {
-       if voteSet == nil {
-               return "nil-VoteSet"
-       }
-       return voteSet.StringIndented("")
-}
-
-func (voteSet *VoteSet) StringIndented(indent string) string {
-       voteStrings := make([]string, len(voteSet.votes))
-       for i, vote := range voteSet.votes {
-               if vote == nil {
-                       voteStrings[i] = "nil-Vote"
-               } else {
-                       voteStrings[i] = vote.String()
-               }
-       }
-       return fmt.Sprintf(`VoteSet{
-%s  H:%v R:%v T:%v
-%s  %v
-%s  %v
-%s  %v
-%s}`,
-               indent, voteSet.height, voteSet.round, voteSet.type_,
-               indent, strings.Join(voteStrings, "\n"+indent+"  "),
-               indent, voteSet.votesBitArray,
-               indent, voteSet.peerMaj23s,
-               indent)
-}
-
-func (voteSet *VoteSet) StringShort() string {
-       if voteSet == nil {
-               return "nil-VoteSet"
-       }
-       voteSet.mtx.Lock()
-       defer voteSet.mtx.Unlock()
-       return fmt.Sprintf(`VoteSet{H:%v R:%v T:%v +2/3:%v %v %v}`,
-               voteSet.height, voteSet.round, voteSet.type_, voteSet.maj23, voteSet.votesBitArray, voteSet.peerMaj23s)
-}
-
-//--------------------------------------------------------------------------------
-// Commit
-
-func (voteSet *VoteSet) MakeCommit() *Commit {
-       if voteSet.type_ != VoteTypePrecommit {
-               PanicSanity("Cannot MakeCommit() unless VoteSet.Type is VoteTypePrecommit")
-       }
-       voteSet.mtx.Lock()
-       defer voteSet.mtx.Unlock()
-
-       // Make sure we have a 2/3 majority
-       if voteSet.maj23 == nil {
-               PanicSanity("Cannot MakeCommit() unless a blockhash has +2/3")
-       }
-
-       // For every validator, get the precommit
-       votesCopy := make([]*Vote, len(voteSet.votes))
-       copy(votesCopy, voteSet.votes)
-       return &Commit{
-               BlockID:    *voteSet.maj23,
-               Precommits: votesCopy,
-       }
-}
-
-//--------------------------------------------------------------------------------
-
-/*
-       Votes for a particular block
-       There are two ways a *blockVotes gets created for a blockKey.
-       1. first (non-conflicting) vote of a validator w/ blockKey (peerMaj23=false)
-       2. A peer claims to have a 2/3 majority w/ blockKey (peerMaj23=true)
-*/
-type blockVotes struct {
-       peerMaj23 bool      // peer claims to have maj23
-       bitArray  *BitArray // valIndex -> hasVote?
-       votes     []*Vote   // valIndex -> *Vote
-       sum       int64     // vote sum
-}
-
-func newBlockVotes(peerMaj23 bool, numValidators int) *blockVotes {
-       return &blockVotes{
-               peerMaj23: peerMaj23,
-               bitArray:  NewBitArray(numValidators),
-               votes:     make([]*Vote, numValidators),
-               sum:       0,
-       }
-}
-
-func (vs *blockVotes) addVerifiedVote(vote *Vote, votingPower int64) {
-       valIndex := vote.ValidatorIndex
-       if existing := vs.votes[valIndex]; existing == nil {
-               vs.bitArray.SetIndex(valIndex, true)
-               vs.votes[valIndex] = vote
-               vs.sum += votingPower
-       }
-}
-
-func (vs *blockVotes) getByIndex(index int) *Vote {
-       if vs == nil {
-               return nil
-       }
-       return vs.votes[index]
-}
-
-//--------------------------------------------------------------------------------
-
-// Common interface between *consensus.VoteSet and types.Commit
-type VoteSetReader interface {
-       Height() int
-       Round() int
-       Type() byte
-       Size() int
-       BitArray() *BitArray
-       GetByIndex(int) *Vote
-       IsCommit() bool
-}
diff --git a/types/vote_set_test.go b/types/vote_set_test.go
deleted file mode 100644 (file)
index 84e13ac..0000000
+++ /dev/null
@@ -1,471 +0,0 @@
-package types
-
-import (
-       "bytes"
-
-       "github.com/tendermint/go-crypto"
-       . "github.com/tendermint/tmlibs/common"
-       . "github.com/tendermint/tmlibs/test"
-
-       "testing"
-)
-
-// NOTE: privValidators are in order
-func randVoteSet(height int, round int, type_ byte, numValidators int, votingPower int64) (*VoteSet, *ValidatorSet, []*PrivValidator) {
-       valSet, privValidators := RandValidatorSet(numValidators, votingPower)
-       return NewVoteSet("test_chain_id", height, round, type_, valSet), valSet, privValidators
-}
-
-// Convenience: Return new vote with different validator address/index
-func withValidator(vote *Vote, addr []byte, idx int) *Vote {
-       vote = vote.Copy()
-       vote.ValidatorAddress = addr
-       vote.ValidatorIndex = idx
-       return vote
-}
-
-// Convenience: Return new vote with different height
-func withHeight(vote *Vote, height int) *Vote {
-       vote = vote.Copy()
-       vote.Height = height
-       return vote
-}
-
-// Convenience: Return new vote with different round
-func withRound(vote *Vote, round int) *Vote {
-       vote = vote.Copy()
-       vote.Round = round
-       return vote
-}
-
-// Convenience: Return new vote with different type
-func withType(vote *Vote, type_ byte) *Vote {
-       vote = vote.Copy()
-       vote.Type = type_
-       return vote
-}
-
-// Convenience: Return new vote with different blockHash
-func withBlockHash(vote *Vote, blockHash []byte) *Vote {
-       vote = vote.Copy()
-       vote.BlockID.Hash = blockHash
-       return vote
-}
-
-// Convenience: Return new vote with different blockParts
-func withBlockPartsHeader(vote *Vote, blockPartsHeader PartSetHeader) *Vote {
-       vote = vote.Copy()
-       vote.BlockID.PartsHeader = blockPartsHeader
-       return vote
-}
-
-func signAddVote(privVal *PrivValidator, vote *Vote, voteSet *VoteSet) (bool, error) {
-       vote.Signature = privVal.Sign(SignBytes(voteSet.ChainID(), vote))
-       added, err := voteSet.AddVote(vote)
-       return added, err
-}
-
-func TestAddVote(t *testing.T) {
-       height, round := 1, 0
-       voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 10, 1)
-       val0 := privValidators[0]
-
-       // t.Logf(">> %v", voteSet)
-
-       if voteSet.GetByAddress(val0.Address) != nil {
-               t.Errorf("Expected GetByAddress(val0.Address) to be nil")
-       }
-       if voteSet.BitArray().GetIndex(0) {
-               t.Errorf("Expected BitArray.GetIndex(0) to be false")
-       }
-       blockID, ok := voteSet.TwoThirdsMajority()
-       if ok || !blockID.IsZero() {
-               t.Errorf("There should be no 2/3 majority")
-       }
-
-       vote := &Vote{
-               ValidatorAddress: val0.Address,
-               ValidatorIndex:   0, // since privValidators are in order
-               Height:           height,
-               Round:            round,
-               Type:             VoteTypePrevote,
-               BlockID:          BlockID{nil, PartSetHeader{}},
-       }
-       _, err := signAddVote(val0, vote, voteSet)
-       if err != nil {
-               t.Error(err)
-       }
-
-       if voteSet.GetByAddress(val0.Address) == nil {
-               t.Errorf("Expected GetByAddress(val0.Address) to be present")
-       }
-       if !voteSet.BitArray().GetIndex(0) {
-               t.Errorf("Expected BitArray.GetIndex(0) to be true")
-       }
-       blockID, ok = voteSet.TwoThirdsMajority()
-       if ok || !blockID.IsZero() {
-               t.Errorf("There should be no 2/3 majority")
-       }
-}
-
-func Test2_3Majority(t *testing.T) {
-       height, round := 1, 0
-       voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 10, 1)
-
-       voteProto := &Vote{
-               ValidatorAddress: nil, // NOTE: must fill in
-               ValidatorIndex:   -1,  // NOTE: must fill in
-               Height:           height,
-               Round:            round,
-               Type:             VoteTypePrevote,
-               BlockID:          BlockID{nil, PartSetHeader{}},
-       }
-       // 6 out of 10 voted for nil.
-       for i := 0; i < 6; i++ {
-               vote := withValidator(voteProto, privValidators[i].Address, i)
-               signAddVote(privValidators[i], vote, voteSet)
-       }
-       blockID, ok := voteSet.TwoThirdsMajority()
-       if ok || !blockID.IsZero() {
-               t.Errorf("There should be no 2/3 majority")
-       }
-
-       // 7th validator voted for some blockhash
-       {
-               vote := withValidator(voteProto, privValidators[6].Address, 6)
-               signAddVote(privValidators[6], withBlockHash(vote, RandBytes(32)), voteSet)
-               blockID, ok = voteSet.TwoThirdsMajority()
-               if ok || !blockID.IsZero() {
-                       t.Errorf("There should be no 2/3 majority")
-               }
-       }
-
-       // 8th validator voted for nil.
-       {
-               vote := withValidator(voteProto, privValidators[7].Address, 7)
-               signAddVote(privValidators[7], vote, voteSet)
-               blockID, ok = voteSet.TwoThirdsMajority()
-               if !ok || !blockID.IsZero() {
-                       t.Errorf("There should be 2/3 majority for nil")
-               }
-       }
-}
-
-func Test2_3MajorityRedux(t *testing.T) {
-       height, round := 1, 0
-       voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 100, 1)
-
-       blockHash := crypto.CRandBytes(32)
-       blockPartsTotal := 123
-       blockPartsHeader := PartSetHeader{blockPartsTotal, crypto.CRandBytes(32)}
-
-       voteProto := &Vote{
-               ValidatorAddress: nil, // NOTE: must fill in
-               ValidatorIndex:   -1,  // NOTE: must fill in
-               Height:           height,
-               Round:            round,
-               Type:             VoteTypePrevote,
-               BlockID:          BlockID{blockHash, blockPartsHeader},
-       }
-
-       // 66 out of 100 voted for nil.
-       for i := 0; i < 66; i++ {
-               vote := withValidator(voteProto, privValidators[i].Address, i)
-               signAddVote(privValidators[i], vote, voteSet)
-       }
-       blockID, ok := voteSet.TwoThirdsMajority()
-       if ok || !blockID.IsZero() {
-               t.Errorf("There should be no 2/3 majority")
-       }
-
-       // 67th validator voted for nil
-       {
-               vote := withValidator(voteProto, privValidators[66].Address, 66)
-               signAddVote(privValidators[66], withBlockHash(vote, nil), voteSet)
-               blockID, ok = voteSet.TwoThirdsMajority()
-               if ok || !blockID.IsZero() {
-                       t.Errorf("There should be no 2/3 majority: last vote added was nil")
-               }
-       }
-
-       // 68th validator voted for a different BlockParts PartSetHeader
-       {
-               vote := withValidator(voteProto, privValidators[67].Address, 67)
-               blockPartsHeader := PartSetHeader{blockPartsTotal, crypto.CRandBytes(32)}
-               signAddVote(privValidators[67], withBlockPartsHeader(vote, blockPartsHeader), voteSet)
-               blockID, ok = voteSet.TwoThirdsMajority()
-               if ok || !blockID.IsZero() {
-                       t.Errorf("There should be no 2/3 majority: last vote added had different PartSetHeader Hash")
-               }
-       }
-
-       // 69th validator voted for different BlockParts Total
-       {
-               vote := withValidator(voteProto, privValidators[68].Address, 68)
-               blockPartsHeader := PartSetHeader{blockPartsTotal + 1, blockPartsHeader.Hash}
-               signAddVote(privValidators[68], withBlockPartsHeader(vote, blockPartsHeader), voteSet)
-               blockID, ok = voteSet.TwoThirdsMajority()
-               if ok || !blockID.IsZero() {
-                       t.Errorf("There should be no 2/3 majority: last vote added had different PartSetHeader Total")
-               }
-       }
-
-       // 70th validator voted for different BlockHash
-       {
-               vote := withValidator(voteProto, privValidators[69].Address, 69)
-               signAddVote(privValidators[69], withBlockHash(vote, RandBytes(32)), voteSet)
-               blockID, ok = voteSet.TwoThirdsMajority()
-               if ok || !blockID.IsZero() {
-                       t.Errorf("There should be no 2/3 majority: last vote added had different BlockHash")
-               }
-       }
-
-       // 71st validator voted for the right BlockHash & BlockPartsHeader
-       {
-               vote := withValidator(voteProto, privValidators[70].Address, 70)
-               signAddVote(privValidators[70], vote, voteSet)
-               blockID, ok = voteSet.TwoThirdsMajority()
-               if !ok || !blockID.Equals(BlockID{blockHash, blockPartsHeader}) {
-                       t.Errorf("There should be 2/3 majority")
-               }
-       }
-}
-
-func TestBadVotes(t *testing.T) {
-       height, round := 1, 0
-       voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 10, 1)
-
-       voteProto := &Vote{
-               ValidatorAddress: nil,
-               ValidatorIndex:   -1,
-               Height:           height,
-               Round:            round,
-               Type:             VoteTypePrevote,
-               BlockID:          BlockID{nil, PartSetHeader{}},
-       }
-
-       // val0 votes for nil.
-       {
-               vote := withValidator(voteProto, privValidators[0].Address, 0)
-               added, err := signAddVote(privValidators[0], vote, voteSet)
-               if !added || err != nil {
-                       t.Errorf("Expected VoteSet.Add to succeed")
-               }
-       }
-
-       // val0 votes again for some block.
-       {
-               vote := withValidator(voteProto, privValidators[0].Address, 0)
-               added, err := signAddVote(privValidators[0], withBlockHash(vote, RandBytes(32)), voteSet)
-               if added || err == nil {
-                       t.Errorf("Expected VoteSet.Add to fail, conflicting vote.")
-               }
-       }
-
-       // val1 votes on another height
-       {
-               vote := withValidator(voteProto, privValidators[1].Address, 1)
-               added, err := signAddVote(privValidators[1], withHeight(vote, height+1), voteSet)
-               if added || err == nil {
-                       t.Errorf("Expected VoteSet.Add to fail, wrong height")
-               }
-       }
-
-       // val2 votes on another round
-       {
-               vote := withValidator(voteProto, privValidators[2].Address, 2)
-               added, err := signAddVote(privValidators[2], withRound(vote, round+1), voteSet)
-               if added || err == nil {
-                       t.Errorf("Expected VoteSet.Add to fail, wrong round")
-               }
-       }
-
-       // val3 votes of another type.
-       {
-               vote := withValidator(voteProto, privValidators[3].Address, 3)
-               added, err := signAddVote(privValidators[3], withType(vote, VoteTypePrecommit), voteSet)
-               if added || err == nil {
-                       t.Errorf("Expected VoteSet.Add to fail, wrong type")
-               }
-       }
-}
-
-func TestConflicts(t *testing.T) {
-       height, round := 1, 0
-       voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 4, 1)
-       blockHash1 := RandBytes(32)
-       blockHash2 := RandBytes(32)
-
-       voteProto := &Vote{
-               ValidatorAddress: nil,
-               ValidatorIndex:   -1,
-               Height:           height,
-               Round:            round,
-               Type:             VoteTypePrevote,
-               BlockID:          BlockID{nil, PartSetHeader{}},
-       }
-
-       // val0 votes for nil.
-       {
-               vote := withValidator(voteProto, privValidators[0].Address, 0)
-               added, err := signAddVote(privValidators[0], vote, voteSet)
-               if !added || err != nil {
-                       t.Errorf("Expected VoteSet.Add to succeed")
-               }
-       }
-
-       // val0 votes again for blockHash1.
-       {
-               vote := withValidator(voteProto, privValidators[0].Address, 0)
-               added, err := signAddVote(privValidators[0], withBlockHash(vote, blockHash1), voteSet)
-               if added {
-                       t.Errorf("Expected VoteSet.Add to fail, conflicting vote.")
-               }
-               if err == nil {
-                       t.Errorf("Expected VoteSet.Add to return error, conflicting vote.")
-               }
-       }
-
-       // start tracking blockHash1
-       voteSet.SetPeerMaj23("peerA", BlockID{blockHash1, PartSetHeader{}})
-
-       // val0 votes again for blockHash1.
-       {
-               vote := withValidator(voteProto, privValidators[0].Address, 0)
-               added, err := signAddVote(privValidators[0], withBlockHash(vote, blockHash1), voteSet)
-               if !added {
-                       t.Errorf("Expected VoteSet.Add to succeed, called SetPeerMaj23().")
-               }
-               if err == nil {
-                       t.Errorf("Expected VoteSet.Add to return error, conflicting vote.")
-               }
-       }
-
-       // attempt tracking blockHash2, should fail because already set for peerA.
-       voteSet.SetPeerMaj23("peerA", BlockID{blockHash2, PartSetHeader{}})
-
-       // val0 votes again for blockHash1.
-       {
-               vote := withValidator(voteProto, privValidators[0].Address, 0)
-               added, err := signAddVote(privValidators[0], withBlockHash(vote, blockHash2), voteSet)
-               if added {
-                       t.Errorf("Expected VoteSet.Add to fail, duplicate SetPeerMaj23() from peerA")
-               }
-               if err == nil {
-                       t.Errorf("Expected VoteSet.Add to return error, conflicting vote.")
-               }
-       }
-
-       // val1 votes for blockHash1.
-       {
-               vote := withValidator(voteProto, privValidators[1].Address, 1)
-               added, err := signAddVote(privValidators[1], withBlockHash(vote, blockHash1), voteSet)
-               if !added || err != nil {
-                       t.Errorf("Expected VoteSet.Add to succeed")
-               }
-       }
-
-       // check
-       if voteSet.HasTwoThirdsMajority() {
-               t.Errorf("We shouldn't have 2/3 majority yet")
-       }
-       if voteSet.HasTwoThirdsAny() {
-               t.Errorf("We shouldn't have 2/3 if any votes yet")
-       }
-
-       // val2 votes for blockHash2.
-       {
-               vote := withValidator(voteProto, privValidators[2].Address, 2)
-               added, err := signAddVote(privValidators[2], withBlockHash(vote, blockHash2), voteSet)
-               if !added || err != nil {
-                       t.Errorf("Expected VoteSet.Add to succeed")
-               }
-       }
-
-       // check
-       if voteSet.HasTwoThirdsMajority() {
-               t.Errorf("We shouldn't have 2/3 majority yet")
-       }
-       if !voteSet.HasTwoThirdsAny() {
-               t.Errorf("We should have 2/3 if any votes")
-       }
-
-       // now attempt tracking blockHash1
-       voteSet.SetPeerMaj23("peerB", BlockID{blockHash1, PartSetHeader{}})
-
-       // val2 votes for blockHash1.
-       {
-               vote := withValidator(voteProto, privValidators[2].Address, 2)
-               added, err := signAddVote(privValidators[2], withBlockHash(vote, blockHash1), voteSet)
-               if !added {
-                       t.Errorf("Expected VoteSet.Add to succeed")
-               }
-               if err == nil {
-                       t.Errorf("Expected VoteSet.Add to return error, conflicting vote")
-               }
-       }
-
-       // check
-       if !voteSet.HasTwoThirdsMajority() {
-               t.Errorf("We should have 2/3 majority for blockHash1")
-       }
-       blockIDMaj23, _ := voteSet.TwoThirdsMajority()
-       if !bytes.Equal(blockIDMaj23.Hash, blockHash1) {
-               t.Errorf("Got the wrong 2/3 majority blockhash")
-       }
-       if !voteSet.HasTwoThirdsAny() {
-               t.Errorf("We should have 2/3 if any votes")
-       }
-
-}
-
-func TestMakeCommit(t *testing.T) {
-       height, round := 1, 0
-       voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrecommit, 10, 1)
-       blockHash, blockPartsHeader := crypto.CRandBytes(32), PartSetHeader{123, crypto.CRandBytes(32)}
-
-       voteProto := &Vote{
-               ValidatorAddress: nil,
-               ValidatorIndex:   -1,
-               Height:           height,
-               Round:            round,
-               Type:             VoteTypePrecommit,
-               BlockID:          BlockID{blockHash, blockPartsHeader},
-       }
-
-       // 6 out of 10 voted for some block.
-       for i := 0; i < 6; i++ {
-               vote := withValidator(voteProto, privValidators[i].Address, i)
-               signAddVote(privValidators[i], vote, voteSet)
-       }
-
-       // MakeCommit should fail.
-       AssertPanics(t, "Doesn't have +2/3 majority", func() { voteSet.MakeCommit() })
-
-       // 7th voted for some other block.
-       {
-               vote := withValidator(voteProto, privValidators[6].Address, 6)
-               vote = withBlockHash(vote, RandBytes(32))
-               vote = withBlockPartsHeader(vote, PartSetHeader{123, RandBytes(32)})
-               signAddVote(privValidators[6], vote, voteSet)
-       }
-
-       // The 8th voted like everyone else.
-       {
-               vote := withValidator(voteProto, privValidators[7].Address, 7)
-               signAddVote(privValidators[7], vote, voteSet)
-       }
-
-       commit := voteSet.MakeCommit()
-
-       // Commit should have 10 elements
-       if len(commit.Precommits) != 10 {
-               t.Errorf("Commit Precommits should have the same number of precommits as validators")
-       }
-
-       // Ensure that Commit precommits are ordered.
-       if err := commit.ValidateBasic(); err != nil {
-               t.Errorf("Error in Commit.ValidateBasic(): %v", err)
-       }
-
-}
diff --git a/types/vote_test.go b/types/vote_test.go
deleted file mode 100644 (file)
index 353acfa..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-package types
-
-import (
-       "testing"
-)
-
-func TestVoteSignable(t *testing.T) {
-       vote := &Vote{
-               ValidatorAddress: []byte("addr"),
-               ValidatorIndex:   56789,
-               Height:           12345,
-               Round:            23456,
-               Type:             byte(2),
-               BlockID: BlockID{
-                       Hash: []byte("hash"),
-                       PartsHeader: PartSetHeader{
-                               Total: 1000000,
-                               Hash:  []byte("parts_hash"),
-                       },
-               },
-       }
-       signBytes := SignBytes("test_chain_id", vote)
-       signStr := string(signBytes)
-
-       expected := `{"chain_id":"test_chain_id","vote":{"block_id":{"hash":"68617368","parts":{"hash":"70617274735F68617368","total":1000000}},"height":12345,"round":23456,"type":2}}`
-       if signStr != expected {
-               // NOTE: when this fails, you probably want to fix up consensus/replay_test too
-               t.Errorf("Got unexpected sign string for Vote. Expected:\n%v\nGot:\n%v", expected, signStr)
-       }
-}