OSDN Git Service

Added blockchain struct.
authorgguoss <1536310027@qq.com>
Fri, 14 Jul 2017 05:57:40 +0000 (13:57 +0800)
committergguoss <1536310027@qq.com>
Fri, 14 Jul 2017 05:57:40 +0000 (13:57 +0800)
139 files changed:
README.md
cmd/blockchain/commands/flags/log_level.go [new file with mode: 0644]
cmd/blockchain/commands/init.go [new file with mode: 0644]
cmd/blockchain/commands/root.go [new file with mode: 0644]
cmd/blockchain/commands/run_node.go [new file with mode: 0644]
cmd/blockchain/commands/version.go [new file with mode: 0644]
cmd/blockchain/main.go [new file with mode: 0644]
database/pg/errors.go [new file with mode: 0644]
database/pg/listen.go [new file with mode: 0644]
database/pg/pg.go [new file with mode: 0644]
database/pg/pg_test.go [new file with mode: 0644]
database/pg/pgtest/ctx_test.go [new file with mode: 0644]
database/pg/pgtest/doc.go [new file with mode: 0644]
database/pg/pgtest/dump.go [new file with mode: 0644]
database/pg/pgtest/pgtest.go [new file with mode: 0644]
database/pg/pgtest/wrap.go [new file with mode: 0644]
database/pg/query.go [new file with mode: 0644]
database/sinkdb/internal/sinkpb/gen.go [new file with mode: 0644]
database/sinkdb/internal/sinkpb/op.pb.go [new file with mode: 0644]
database/sinkdb/internal/sinkpb/op.proto [new file with mode: 0644]
database/sinkdb/internal/sinkpb/snapshot.pb.go [new file with mode: 0644]
database/sinkdb/internal/sinkpb/snapshot.proto [new file with mode: 0644]
database/sinkdb/op.go [new file with mode: 0644]
database/sinkdb/sinkdb.go [new file with mode: 0644]
database/sinkdb/sinkdb_test.go [new file with mode: 0644]
database/sinkdb/sinkdbtest/sinkdbtest.go [new file with mode: 0644]
database/sinkdb/state.go [new file with mode: 0644]
database/sinkdb/state_test.go [new file with mode: 0644]
database/sinkdb/version.go [new file with mode: 0644]
database/sqlutil/doc.go [new file with mode: 0644]
database/sqlutil/logdriver.go [new file with mode: 0644]
encoding/blockchain/blockchain.go [new file with mode: 0644]
encoding/blockchain/blockchain_test.go [new file with mode: 0644]
encoding/bufpool/bufpool.go [new file with mode: 0644]
encoding/json/duration.go [new file with mode: 0644]
encoding/json/duration_test.go [new file with mode: 0644]
encoding/json/json.go [new file with mode: 0644]
errors/doc.go [new file with mode: 0644]
errors/errors.go [new file with mode: 0644]
errors/errors_test.go [new file with mode: 0644]
errors/example_test.go [new file with mode: 0644]
errors/stack.go [new file with mode: 0644]
errors/writer.go [new file with mode: 0644]
errors/writer_test.go [new file with mode: 0644]
protocol/bc/asset.go [new file with mode: 0644]
protocol/bc/asset_test.go [new file with mode: 0644]
protocol/bc/bc.pb.go [new file with mode: 0644]
protocol/bc/bc.proto [new file with mode: 0644]
protocol/bc/bctest/tx.go [new file with mode: 0644]
protocol/bc/block.go [new file with mode: 0644]
protocol/bc/blockheader.go [new file with mode: 0644]
protocol/bc/doc.go [new file with mode: 0644]
protocol/bc/entry.go [new file with mode: 0644]
protocol/bc/entry_test.go [new file with mode: 0644]
protocol/bc/gen.go [new file with mode: 0644]
protocol/bc/hash.go [new file with mode: 0644]
protocol/bc/issuance.go [new file with mode: 0644]
protocol/bc/legacy/bc_test.go [new file with mode: 0644]
protocol/bc/legacy/block.go [new file with mode: 0644]
protocol/bc/legacy/block_commitment.go [new file with mode: 0644]
protocol/bc/legacy/block_header.go [new file with mode: 0644]
protocol/bc/legacy/block_test.go [new file with mode: 0644]
protocol/bc/legacy/block_witness.go [new file with mode: 0644]
protocol/bc/legacy/fuzz_test.go [new file with mode: 0644]
protocol/bc/legacy/issuance.go [new file with mode: 0644]
protocol/bc/legacy/issuance_witness.go [new file with mode: 0644]
protocol/bc/legacy/map.go [new file with mode: 0644]
protocol/bc/legacy/map_test.go [new file with mode: 0644]
protocol/bc/legacy/output_commitment.go [new file with mode: 0644]
protocol/bc/legacy/spend.go [new file with mode: 0644]
protocol/bc/legacy/transaction.go [new file with mode: 0644]
protocol/bc/legacy/transaction_test.go [new file with mode: 0644]
protocol/bc/legacy/tx_test.go [new file with mode: 0644]
protocol/bc/legacy/txinput.go [new file with mode: 0644]
protocol/bc/legacy/txoutput.go [new file with mode: 0644]
protocol/bc/merkle.go [new file with mode: 0644]
protocol/bc/merkle_test.go [new file with mode: 0644]
protocol/bc/mux.go [new file with mode: 0644]
protocol/bc/nonce.go [new file with mode: 0644]
protocol/bc/output.go [new file with mode: 0644]
protocol/bc/retirement.go [new file with mode: 0644]
protocol/bc/spend.go [new file with mode: 0644]
protocol/bc/time.go [new file with mode: 0644]
protocol/bc/timerange.go [new file with mode: 0644]
protocol/bc/translation.md [new file with mode: 0644]
protocol/bc/tx.go [new file with mode: 0644]
protocol/bc/txheader.go [new file with mode: 0644]
protocol/block.go [new file with mode: 0644]
protocol/block_test.go [new file with mode: 0644]
protocol/patricia/patricia.go [new file with mode: 0644]
protocol/patricia/patricia_test.go [new file with mode: 0644]
protocol/protocol.go [new file with mode: 0644]
protocol/prottest/block.go [new file with mode: 0644]
protocol/prottest/block_test.go [new file with mode: 0644]
protocol/prottest/doc.go [new file with mode: 0644]
protocol/prottest/memstore/memstore.go [new file with mode: 0644]
protocol/recover.go [new file with mode: 0644]
protocol/recover_test.go [new file with mode: 0644]
protocol/state/snapshot.go [new file with mode: 0644]
protocol/state/snapshot_test.go [new file with mode: 0644]
protocol/tx.go [new file with mode: 0644]
protocol/tx_test.go [new file with mode: 0644]
protocol/validation/block_test.go [new file with mode: 0644]
protocol/validation/fuzz_test.go [new file with mode: 0644]
protocol/validation/validation.go [new file with mode: 0644]
protocol/validation/validation_test.go [new file with mode: 0644]
protocol/validation/vmcontext.go [new file with mode: 0644]
protocol/validation/vmcontext_test.go [new file with mode: 0644]
protocol/vm/assemble.go [new file with mode: 0644]
protocol/vm/assemble_test.go [new file with mode: 0644]
protocol/vm/bitwise.go [new file with mode: 0644]
protocol/vm/bitwise_test.go [new file with mode: 0644]
protocol/vm/context.go [new file with mode: 0644]
protocol/vm/control.go [new file with mode: 0644]
protocol/vm/control_test.go [new file with mode: 0644]
protocol/vm/crypto.go [new file with mode: 0644]
protocol/vm/crypto_test.go [new file with mode: 0644]
protocol/vm/doc.go [new file with mode: 0644]
protocol/vm/errors.go [new file with mode: 0644]
protocol/vm/introspection.go [new file with mode: 0644]
protocol/vm/introspection_test.go [new file with mode: 0644]
protocol/vm/numeric.go [new file with mode: 0644]
protocol/vm/numeric_test.go [new file with mode: 0644]
protocol/vm/ops.go [new file with mode: 0644]
protocol/vm/ops_test.go [new file with mode: 0644]
protocol/vm/pushdata.go [new file with mode: 0644]
protocol/vm/pushdata_test.go [new file with mode: 0644]
protocol/vm/splice.go [new file with mode: 0644]
protocol/vm/splice_test.go [new file with mode: 0644]
protocol/vm/stack.go [new file with mode: 0644]
protocol/vm/stack_test.go [new file with mode: 0644]
protocol/vm/types.go [new file with mode: 0644]
protocol/vm/types_test.go [new file with mode: 0644]
protocol/vm/vm.go [new file with mode: 0644]
protocol/vm/vm_test.go [new file with mode: 0644]
protocol/vm/vmutil/builder.go [new file with mode: 0644]
protocol/vm/vmutil/builder_test.go [new file with mode: 0644]
protocol/vm/vmutil/script.go [new file with mode: 0644]
protocol/vm/vmutil/script_test.go [new file with mode: 0644]

index 08f8c94..1e988d6 100644 (file)
--- a/README.md
+++ b/README.md
@@ -1 +1,2 @@
-# BlockChain
\ No newline at end of file
+# BlockChain.
+
diff --git a/cmd/blockchain/commands/flags/log_level.go b/cmd/blockchain/commands/flags/log_level.go
new file mode 100644 (file)
index 0000000..f9451ff
--- /dev/null
@@ -0,0 +1,85 @@
+package flags
+
+import (
+       "fmt"
+       "strings"
+
+       "github.com/pkg/errors"
+
+       cfg "github.com/node_p2p/config"
+       "github.com/tendermint/tmlibs/log"
+)
+
+const (
+       defaultLogLevelKey = "*"
+)
+
+// ParseLogLevel parses complex log level - comma-separated
+// list of module:level pairs with an optional *:level pair (* means
+// all other modules).
+//
+func ParseLogLevel(lvl string, logger log.Logger) (log.Logger, error) {
+       if lvl == "" {
+               return nil, errors.New("Empty log level")
+       }
+
+       l := lvl
+
+       // prefix simple one word levels (e.g. "info") with "*"
+       if !strings.Contains(l, ":") {
+               l = defaultLogLevelKey + ":" + l
+       }
+
+       options := make([]log.Option, 0)
+
+       isDefaultLogLevelSet := false
+       var option log.Option
+       var err error
+
+       list := strings.Split(l, ",")
+       for _, item := range list {
+               moduleAndLevel := strings.Split(item, ":")
+
+               if len(moduleAndLevel) != 2 {
+                       return nil, fmt.Errorf("Expected list in a form of \"module:level\" pairs, given pair %s, list %s", item, list)
+               }
+
+               module := moduleAndLevel[0]
+               level := moduleAndLevel[1]
+
+               if module == defaultLogLevelKey {
+                       option, err = log.AllowLevel(level)
+                       if err != nil {
+                               return nil, errors.Wrap(err, fmt.Sprintf("Failed to parse default log level (pair %s, list %s)", item, l))
+                       }
+                       options = append(options, option)
+                       isDefaultLogLevelSet = true
+               } else {
+                       switch level {
+                       case "debug":
+                               option = log.AllowDebugWith("module", module)
+                       case "info":
+                               option = log.AllowInfoWith("module", module)
+                       case "error":
+                               option = log.AllowErrorWith("module", module)
+                       case "none":
+                               option = log.AllowNoneWith("module", module)
+                       default:
+                               return nil, fmt.Errorf("Expected either \"info\", \"debug\", \"error\" or \"none\" log level, given %s (pair %s, list %s)", level, item, list)
+                       }
+                       options = append(options, option)
+
+               }
+       }
+
+       // if "*" is not provided, set default global level
+       if !isDefaultLogLevelSet {
+               option, err = log.AllowLevel(cfg.DefaultLogLevel())
+               if err != nil {
+                       return nil, err
+               }
+               options = append(options, option)
+       }
+
+       return log.NewFilter(logger, options...), nil
+}
diff --git a/cmd/blockchain/commands/init.go b/cmd/blockchain/commands/init.go
new file mode 100644 (file)
index 0000000..7ec7ac9
--- /dev/null
@@ -0,0 +1,47 @@
+package commands
+
+import (
+       "os"
+
+       "github.com/spf13/cobra"
+
+       "github.com/node_p2p/types"
+       cmn "github.com/tendermint/tmlibs/common"
+)
+
+var initFilesCmd = &cobra.Command{
+       Use:   "init",
+       Short: "Initialize node_p2p",
+       Run:   initFiles,
+}
+
+func init() {
+       RootCmd.AddCommand(initFilesCmd)
+}
+
+func initFiles(cmd *cobra.Command, args []string) {
+       privValFile := config.PrivValidatorFile()
+       if _, err := os.Stat(privValFile); os.IsNotExist(err) {
+               privValidator := types.GenPrivValidator()
+               privValidator.SetFile(privValFile)
+               privValidator.Save()
+
+               genFile := config.GenesisFile()
+
+               if _, err := os.Stat(genFile); os.IsNotExist(err) {
+                       genDoc := types.GenesisDoc{
+                               ChainID: cmn.Fmt("chain0"),
+                       }
+                       genDoc.Validators = []types.GenesisValidator{types.GenesisValidator{
+                               PubKey: privValidator.PubKey,
+                               Amount: 10,
+                       }}
+
+                       genDoc.SaveAs(genFile)
+               }
+
+               logger.Info("Initialized tendermint", "genesis", config.GenesisFile(), "priv_validator", config.PrivValidatorFile())
+       } else {
+               logger.Info("Already initialized", "priv_validator", config.PrivValidatorFile())
+       }
+}
diff --git a/cmd/blockchain/commands/root.go b/cmd/blockchain/commands/root.go
new file mode 100644 (file)
index 0000000..5fcd4fe
--- /dev/null
@@ -0,0 +1,39 @@
+package commands
+
+import (
+       "os"
+
+       "github.com/spf13/cobra"
+       "github.com/spf13/viper"
+
+       tmflags "github.com/node_p2p/node_p2p/commands/flags"
+       cfg "github.com/node_p2p/config"
+       "github.com/tendermint/tmlibs/log"
+)
+
+var (
+       config = cfg.DefaultConfig()
+       logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)).With("module", "main")
+)
+
+func init() {
+       RootCmd.PersistentFlags().String("log_level", config.LogLevel, "Log level")
+}
+
+var RootCmd = &cobra.Command{
+       Use:   "node_p2p",
+       Short: "node_p2p in Go",
+       PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
+               err := viper.Unmarshal(config)
+               if err != nil {
+                       return err
+               }
+               config.SetRoot(config.RootDir)
+               cfg.EnsureRoot(config.RootDir)
+               logger, err = tmflags.ParseLogLevel(config.LogLevel, logger)
+               if err != nil {
+                       return err
+               }
+               return nil
+       },
+}
diff --git a/cmd/blockchain/commands/run_node.go b/cmd/blockchain/commands/run_node.go
new file mode 100644 (file)
index 0000000..0103603
--- /dev/null
@@ -0,0 +1,72 @@
+package commands
+
+import (
+       "fmt"
+       "io/ioutil"
+       "time"
+
+       "github.com/spf13/cobra"
+
+       "github.com/node_p2p/node"
+       "github.com/node_p2p/types"
+       cmn "github.com/tendermint/tmlibs/common"
+)
+
+var runNodeCmd = &cobra.Command{
+       Use:   "node_p2p",
+       Short: "Run the p2p node",
+       RunE:  runNode,
+}
+
+func init() {
+       // p2p flags
+       runNodeCmd.Flags().String("p2p.laddr", config.P2P.ListenAddress, "Node listen address. (0.0.0.0:0 means any interface, any port)")
+       runNodeCmd.Flags().String("p2p.seeds", config.P2P.Seeds, "Comma delimited host:port seed nodes")
+       runNodeCmd.Flags().Bool("p2p.skip_upnp", config.P2P.SkipUPNP, "Skip UPNP configuration")
+       runNodeCmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "Enable Peer-Exchange (dev feature)")
+
+       RootCmd.AddCommand(runNodeCmd)
+}
+
+func runNode(cmd *cobra.Command, args []string) error {
+
+       // Wait until the genesis doc becomes available
+       // This is for Mintnet compatibility.
+       // TODO: If Mintnet gets deprecated or genesis_file is
+       // always available, remove.
+       genDocFile := config.GenesisFile()
+       if !cmn.FileExists(genDocFile) {
+               logger.Info(cmn.Fmt("Waiting for genesis file %v...", genDocFile))
+               for {
+                       time.Sleep(time.Second)
+                       if !cmn.FileExists(genDocFile) {
+                               continue
+                       }
+                       jsonBlob, err := ioutil.ReadFile(genDocFile)
+                       if err != nil {
+                               return fmt.Errorf("Couldn't read GenesisDoc file: %v", err)
+                       }
+                       genDoc, err := types.GenesisDocFromJSON(jsonBlob)
+                       if err != nil {
+                               return fmt.Errorf("Error reading GenesisDoc: %v", err)
+                       }
+                       if genDoc.ChainID == "" {
+                               return fmt.Errorf("Genesis doc %v must include non-empty chain_id", genDocFile)
+                       }
+                       config.ChainID = genDoc.ChainID
+               }
+       }
+
+       // Create & start node
+       n := node.NewNodeDefault(config, logger.With("module", "node_p2p"))
+       if _, err := n.Start(); err != nil {
+               return fmt.Errorf("Failed to start node: %v", err)
+       } else {
+               logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo())
+       }
+
+       // Trap signal, run forever.
+       n.RunForever()
+
+       return nil
+}
diff --git a/cmd/blockchain/commands/version.go b/cmd/blockchain/commands/version.go
new file mode 100644 (file)
index 0000000..f7b40f8
--- /dev/null
@@ -0,0 +1,21 @@
+package commands
+
+import (
+       "fmt"
+
+       "github.com/spf13/cobra"
+
+       "github.com/node_p2p/version"
+)
+
+var versionCmd = &cobra.Command{
+       Use:   "version",
+       Short: "Show version info",
+       Run: func(cmd *cobra.Command, args []string) {
+               fmt.Println(version.Version)
+       },
+}
+
+func init() {
+       RootCmd.AddCommand(versionCmd)
+}
diff --git a/cmd/blockchain/main.go b/cmd/blockchain/main.go
new file mode 100644 (file)
index 0000000..cb68520
--- /dev/null
@@ -0,0 +1,13 @@
+package main
+
+import (
+       "os"
+
+       "github.com/node_p2p/node_p2p/commands"
+       "github.com/tendermint/tmlibs/cli"
+)
+
+func main() {
+       cmd := cli.PrepareBaseCmd(commands.RootCmd, "TM", os.ExpandEnv("./.node"))
+       cmd.Execute()
+}
diff --git a/database/pg/errors.go b/database/pg/errors.go
new file mode 100644 (file)
index 0000000..ec7460a
--- /dev/null
@@ -0,0 +1,14 @@
+package pg
+
+import "errors"
+
+// ErrUserInputNotFound indicates that a query returned no results.
+// It is equivalent to sql.ErrNoRows, except that ErrUserInputNotFound
+// also indicates the query was based on user-provided parameters,
+// and the lack of results should be communicated back to the user.
+//
+// In contrast, we use sql.ErrNoRows to represent an internal error;
+// this indicates a bug in our code
+// and only a generic "internal error" message
+// should be communicated back to the user.
+var ErrUserInputNotFound = errors.New("pg: user input not found")
diff --git a/database/pg/listen.go b/database/pg/listen.go
new file mode 100644 (file)
index 0000000..8d12505
--- /dev/null
@@ -0,0 +1,25 @@
+package pg
+
+import (
+       "context"
+       "time"
+
+       "github.com/lib/pq"
+
+       "chain/errors"
+       "chain/log"
+       "chain/net"
+)
+
+// NewListener creates a new pq.Listener and begins listening.
+func NewListener(ctx context.Context, dbURL, channel string) (*pq.Listener, error) {
+       // We want etcd name lookups so we use our own Dialer.
+       d := new(net.Dialer)
+       result := pq.NewDialListener(d, dbURL, 1*time.Second, 10*time.Second, func(ev pq.ListenerEventType, err error) {
+               if err != nil {
+                       log.Error(ctx, errors.Wrapf(err, "event in %s listener: %v", channel, ev))
+               }
+       })
+       err := result.Listen(channel)
+       return result, errors.Wrap(err, "listening to channel")
+}
diff --git a/database/pg/pg.go b/database/pg/pg.go
new file mode 100644 (file)
index 0000000..2b33fd4
--- /dev/null
@@ -0,0 +1,129 @@
+// Package pg provides small utilities for the lib/pq
+// database driver.
+//
+// It also registers the sql.Driver "hapg", which can
+// resolve uris from the high-availability postgres package.
+package pg
+
+import (
+       "context"
+       "database/sql"
+       "database/sql/driver"
+       "encoding/json"
+       "fmt"
+       "net"
+       "net/url"
+       "strings"
+       "unicode/utf8"
+
+       "github.com/lib/pq"
+
+       chainnet "chain/net"
+)
+
+// DB holds methods common to the DB, Tx, and Stmt types
+// in package sql.
+type DB interface {
+       QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error)
+       QueryRowContext(context.Context, string, ...interface{}) *sql.Row
+       ExecContext(context.Context, string, ...interface{}) (sql.Result, error)
+}
+
+// TODO: move this under chain/hapg
+type hapgDriver struct{}
+
+func NewDriver() driver.Driver {
+       return hapgDriver{}
+}
+
+func (d hapgDriver) Open(name string) (driver.Conn, error) {
+       name, err := resolveURI(name)
+       if err != nil {
+               return nil, err
+       }
+
+       return pq.Open(name)
+}
+
+func init() {
+       sql.Register("hapg", hapgDriver{})
+}
+
+// IsUniqueViolation returns true if the given error is a Postgres unique
+// constraint violation error.
+func IsUniqueViolation(err error) bool {
+       pqErr, ok := err.(*pq.Error)
+       return ok && pqErr.Code.Name() == "unique_violation"
+}
+
+// IsValidJSONB returns true if the provided bytes may be stored
+// in a Postgres JSONB data type. It validates that b is valid
+// utf-8 and valid json. It also verifies that it does not include
+// the \u0000 escape sequence, unsupported by the jsonb data type:
+// https://www.postgresql.org/message-id/E1YHHV8-00032A-Em@gemulon.postgresql.org
+func IsValidJSONB(b []byte) bool {
+       var v interface{}
+       err := json.Unmarshal(b, &v)
+       return err == nil && utf8.Valid(b) && !containsNullByte(v)
+}
+
+func containsNullByte(v interface{}) (found bool) {
+       const nullByte = '\u0000'
+       switch t := v.(type) {
+       case bool:
+               return false
+       case float64:
+               return false
+       case string:
+               return strings.ContainsRune(t, nullByte)
+       case []interface{}:
+               for _, v := range t {
+                       found = found || containsNullByte(v)
+               }
+               return found
+       case map[string]interface{}:
+               for k, v := range t {
+                       found = found || containsNullByte(k) || containsNullByte(v)
+               }
+               return found
+       case nil:
+               return false
+       default:
+               panic(fmt.Errorf("unknown json type %T", v))
+       }
+}
+
+func resolveURI(rawURI string) (string, error) {
+       u, err := url.Parse(rawURI)
+       if err != nil {
+               return "", err
+       }
+
+       if u.Host == "" {
+               // postgres specifies localhost with the empty string
+               return rawURI, nil
+       }
+
+       host, port, err := net.SplitHostPort(u.Host)
+       if err != nil {
+               // If there's an error, it might be because there's no
+               // port on uri.Host, which is totally fine. If there's
+               // another problem, it will get caught later anyway, so
+               // carry on!
+               host = u.Host
+       }
+
+       addrs, err := chainnet.LookupHost(host)
+       if err != nil {
+               return "", err
+       }
+
+       addr := addrs[0] // there should only be one address
+
+       if port != "" {
+               addr = net.JoinHostPort(addr, port)
+       }
+
+       u.Host = addr
+       return u.String(), nil
+}
diff --git a/database/pg/pg_test.go b/database/pg/pg_test.go
new file mode 100644 (file)
index 0000000..87b5e75
--- /dev/null
@@ -0,0 +1,55 @@
+package pg
+
+import (
+       "net"
+       "testing"
+)
+
+func TestResolveURI(t *testing.T) {
+       addrs, err := net.LookupHost("example.com")
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       cases := []struct {
+               input string
+               want  string
+       }{
+               {"postgres:///foo", "postgres:///foo"},
+               {"postgres://example.com/foo", "postgres://" + addrs[0] + "/foo"},
+       }
+
+       for _, c := range cases {
+               res, err := resolveURI(c.input)
+               if err != nil {
+                       t.Fatalf("unexpected error %v", err)
+               }
+
+               if res != c.want {
+                       t.Fatalf("resolveURI(%q) = %q, want %q", c.input, res, c.want)
+               }
+
+       }
+
+}
+
+func TestIsValidJSONB(t *testing.T) {
+       cases := map[string]bool{
+               `"hello"`: true,
+               `{`:       false,
+               `{"foo": ["bar", "baz"]}`:                    true,
+               `{"bad": {"foo": "bar\u0000"}}`:              false,
+               `{"bad": {"foo\u0000": "bar"}}`:              false,
+               `{"bad": "\u0000"}`:                          false,
+               `["hello", "world", "what is \u0000p?"]`:     false,
+               `"` + string([]byte{0xff, 0xfe, 0xfd}) + `"`: false,
+       }
+
+       for b, want := range cases {
+               t.Run(b, func(t *testing.T) {
+                       if got := IsValidJSONB([]byte(b)); got != want {
+                               t.Errorf("got %t want %t", got, want)
+                       }
+               })
+       }
+}
diff --git a/database/pg/pgtest/ctx_test.go b/database/pg/pgtest/ctx_test.go
new file mode 100644 (file)
index 0000000..a0b6010
--- /dev/null
@@ -0,0 +1,26 @@
+package pgtest
+
+import (
+       "context"
+       "testing"
+       "time"
+
+       "chain/database/pg"
+       "chain/errors"
+)
+
+func TestContextTimeout(t *testing.T) {
+       ctx := context.Background()
+       _, db := NewDB(t, SchemaPath)
+
+       ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond)
+       defer cancel()
+
+       var err error
+       for err == nil {
+               err = pg.ForQueryRows(ctx, db, "SELECT 1", func(i int) {})
+       }
+       if errors.Root(err) != context.DeadlineExceeded {
+               t.Fatalf("Got %s, want %s", err, context.DeadlineExceeded)
+       }
+}
diff --git a/database/pg/pgtest/doc.go b/database/pg/pgtest/doc.go
new file mode 100644 (file)
index 0000000..307218c
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+
+Package pgtest provides support functions for tests that need to
+use Postgres. Most clients will just call NewTx;
+those that need more control can call NewDB.
+
+    func TestSimple(t *testing.T) {
+        dbtx := pgtest.NewTx(t)
+        ...
+    }
+
+    func TestComplex(t *testing.T) {
+        _, db := pgtest.NewDB(t, pgtest.SchemaPath)
+        ...
+        dbtx, err := db.Begin(ctx)
+        ...
+    }
+
+Prefer NewTx when the caller (usually a test function)
+can run in exactly one transaction.
+It's significantly faster than NewDB.
+
+*/
+package pgtest
diff --git a/database/pg/pgtest/dump.go b/database/pg/pgtest/dump.go
new file mode 100644 (file)
index 0000000..39daedf
--- /dev/null
@@ -0,0 +1,34 @@
+package pgtest
+
+import (
+       "net/url"
+       "os/exec"
+       "strings"
+       "testing"
+)
+
+// Dump performs a full pg_dump of the data in the database at the
+// provided URL.
+func Dump(t testing.TB, dbURL string, includeSchema bool, excludingTables ...string) string {
+       u, err := url.Parse(dbURL)
+       if err != nil {
+               t.Fatal(err)
+       }
+       name := strings.TrimLeft(u.Path, "/")
+
+       args := []string{"--no-owner", "--no-privileges", "--inserts"}
+       if !includeSchema {
+               args = append(args, "--data-only")
+       }
+       for _, tbl := range excludingTables {
+               args = append(args, "--exclude-table="+tbl)
+       }
+       args = append(args, name)
+
+       cmd := exec.Command("pg_dump", args...)
+       out, err := cmd.Output()
+       if err != nil {
+               t.Fatal(err)
+       }
+       return string(out)
+}
diff --git a/database/pg/pgtest/pgtest.go b/database/pg/pgtest/pgtest.go
new file mode 100644 (file)
index 0000000..f6d677e
--- /dev/null
@@ -0,0 +1,262 @@
+package pgtest
+
+import (
+       "context"
+       "database/sql"
+       "io/ioutil"
+       "log"
+       "math/rand"
+       "net/url"
+       "os"
+       "runtime"
+       "testing"
+       "time"
+
+       "github.com/lib/pq"
+
+       "chain/database/pg"
+       "chain/testutil"
+)
+
+var (
+       random = rand.New(rand.NewSource(time.Now().UnixNano()))
+
+       // dbpool contains initialized, pristine databases,
+       // as returned from open. It is the client's job to
+       // make sure a database is in this state
+       // (for example, by rolling back a transaction)
+       // before returning it to the pool.
+       dbpool = make(chan *sql.DB, 4)
+)
+
+// DefaultURL is used by NewTX and NewDB if DBURL is the empty string.
+const DefaultURL = "postgres:///postgres?sslmode=disable"
+
+var (
+       // DBURL should be a URL of the form "postgres://...".
+       // If it is the empty string, DefaultURL will be used.
+       // The functions NewTx and NewDB use it to create and connect
+       // to new databases by replacing the database name component
+       // with a randomized name.
+       DBURL = os.Getenv("DB_URL_TEST")
+
+       // SchemaPath is a file containing a schema to initialize
+       // a database in NewTx.
+       SchemaPath = os.Getenv("CHAIN") + "/core/schema.sql"
+)
+
+const (
+       gcDur      = 3 * time.Minute
+       timeFormat = "20060102150405"
+)
+
+// NewDB creates a database initialized
+// with the schema in schemaPath.
+// It returns the resulting *sql.DB with its URL.
+//
+// It also registers a finalizer for the DB, so callers
+// can discard it without closing it explicitly, and the
+// test program is nevertheless unlikely to run out of
+// connection slots in the server.
+//
+// Prefer NewTx whenever the caller can do its
+// work in exactly one transaction.
+func NewDB(f Fataler, schemaPath string) (url string, db *sql.DB) {
+       ctx := context.Background()
+       if os.Getenv("CHAIN") == "" {
+               log.Println("warning: $CHAIN not set; probably can't find schema")
+       }
+       url, db, err := open(ctx, DBURL, schemaPath)
+       if err != nil {
+               f.Fatal(err)
+       }
+       runtime.SetFinalizer(db, (*sql.DB).Close)
+       return url, db
+}
+
+// NewTx returns a new transaction on a database
+// initialized with the schema in SchemaPath.
+//
+// It also registers a finalizer for the Tx, so callers
+// can discard it without rolling back explicitly, and the
+// test program is nevertheless unlikely to run out of
+// connection slots in the server.
+// The caller should not commit the returned Tx; doing so
+// will prevent the underlying database from being reused
+// and so cause future calls to NewTx to be slower.
+func NewTx(f Fataler) *sql.Tx {
+       runtime.GC() // give the finalizers a better chance to run
+       ctx := context.Background()
+       if os.Getenv("CHAIN") == "" {
+               log.Println("warning: $CHAIN not set; probably can't find schema")
+       }
+       db, err := getdb(ctx, DBURL, SchemaPath)
+       if err != nil {
+               f.Fatal(err)
+       }
+       tx, err := db.BeginTx(ctx, nil)
+       if err != nil {
+               db.Close()
+               f.Fatal(err)
+       }
+       // NOTE(kr): we do not set a finalizer on the DB.
+       // It is closed explicitly, if necessary, by finalizeTx.
+       runtime.SetFinalizer(tx, finaldb{db}.finalizeTx)
+       return tx
+}
+
+// CloneDB creates a new database, using the database at the provided
+// URL as a template. It returns the URL of the database clone.
+func CloneDB(ctx context.Context, baseURL string) (newURL string, err error) {
+       u, err := url.Parse(baseURL)
+       if err != nil {
+               return "", err
+       }
+
+       ctldb, err := sql.Open("postgres", baseURL)
+       if err != nil {
+               return "", err
+       }
+       defer ctldb.Close()
+
+       dbname := pickName("db")
+       _, err = ctldb.Exec("CREATE DATABASE " + pq.QuoteIdentifier(dbname) + " WITH TEMPLATE " + pq.QuoteIdentifier(u.Path[1:]))
+       if err != nil {
+               return "", err
+       }
+       u.Path = "/" + dbname
+       return u.String(), nil
+}
+
+// open derives a new randomized test database name from baseURL,
+// initializes it with schemaFile, and opens it.
+func open(ctx context.Context, baseURL, schemaFile string) (newurl string, db *sql.DB, err error) {
+       if baseURL == "" {
+               baseURL = DefaultURL
+       }
+
+       u, err := url.Parse(baseURL)
+       if err != nil {
+               return "", nil, err
+       }
+
+       ctldb, err := sql.Open("postgres", baseURL)
+       if err != nil {
+               return "", nil, err
+       }
+       defer ctldb.Close()
+
+       err = gcdbs(ctldb)
+       if err != nil {
+               log.Println(err)
+       }
+
+       dbname := pickName("db")
+       u.Path = "/" + dbname
+       _, err = ctldb.Exec("CREATE DATABASE " + pq.QuoteIdentifier(dbname))
+       if err != nil {
+               return "", nil, err
+       }
+
+       schema, err := ioutil.ReadFile(schemaFile)
+       if err != nil {
+               return "", nil, err
+       }
+       db, err = sql.Open("postgres", u.String())
+       if err != nil {
+               return "", nil, err
+       }
+       _, err = db.ExecContext(ctx, string(schema))
+       if err != nil {
+               db.Close()
+               return "", nil, err
+       }
+       return u.String(), db, nil
+}
+
+type finaldb struct{ db *sql.DB }
+
+func (f finaldb) finalizeTx(tx *sql.Tx) {
+       go func() { // don't block the finalizer goroutine for too long
+               err := tx.Rollback()
+               if err != nil {
+                       // If the tx has been committed (or if anything
+                       // else goes wrong), we can't reuse db.
+                       f.db.Close()
+                       return
+               }
+               select {
+               case dbpool <- f.db:
+               default:
+                       f.db.Close() // pool is full
+               }
+       }()
+}
+
+func getdb(ctx context.Context, url, path string) (*sql.DB, error) {
+       select {
+       case db := <-dbpool:
+               return db, nil
+       default:
+               _, db, err := open(ctx, url, path)
+               return db, err
+       }
+}
+
+func gcdbs(db *sql.DB) error {
+       gcTime := time.Now().Add(-gcDur)
+       const q = `
+               SELECT datname FROM pg_database
+               WHERE datname LIKE 'pgtest_%' AND datname < $1
+       `
+       rows, err := db.Query(q, formatPrefix("db", gcTime))
+       if err != nil {
+               return err
+       }
+       var names []string
+       for rows.Next() {
+               var name string
+               err = rows.Scan(&name)
+               if err != nil {
+                       return err
+               }
+               names = append(names, name)
+       }
+       if rows.Err() != nil {
+               return rows.Err()
+       }
+       for i, name := range names {
+               if i > 5 {
+                       break // drop up to five databases per test
+               }
+               go db.Exec("DROP DATABASE " + pq.QuoteIdentifier(name))
+       }
+       return nil
+}
+
+func pickName(prefix string) (s string) {
+       const chars = "abcdefghijklmnopqrstuvwxyz"
+       for i := 0; i < 10; i++ {
+               s += string(chars[random.Intn(len(chars))])
+       }
+       return formatPrefix(prefix, time.Now()) + s
+}
+
+func formatPrefix(prefix string, t time.Time) string {
+       return "pgtest_" + prefix + "_" + t.UTC().Format(timeFormat) + "Z_"
+}
+
+// Exec executes q in the database or transaction in ctx.
+// If there is an error, it fails t.
+func Exec(ctx context.Context, db pg.DB, t testing.TB, q string, args ...interface{}) {
+       _, err := db.ExecContext(ctx, q, args...)
+       if err != nil {
+               testutil.FatalErr(t, err)
+       }
+}
+
+// Fataler lets NewTx and NewDB signal immediate failure.
+// It is satisfied by *testing.T, *testing.B, and *log.Logger.
+type Fataler interface {
+       Fatal(...interface{})
+}
diff --git a/database/pg/pgtest/wrap.go b/database/pg/pgtest/wrap.go
new file mode 100644 (file)
index 0000000..07366b7
--- /dev/null
@@ -0,0 +1,63 @@
+package pgtest
+
+import (
+       "database/sql"
+       "database/sql/driver"
+       "runtime"
+       "testing"
+
+       "github.com/lib/pq"
+)
+
+// WrapDB opens a new connection to the database at the provided URL,
+// but with a driver that calls wrapFn on every driver.Stmt.Exec call
+// and driver.Stmt.Query call.
+//
+// It also registers a finalizer for the DB, so callers can discard
+// it without closing it explicitly.
+func WrapDB(t testing.TB, url string, wrapFn func(string)) *sql.DB {
+       // Register a new SQL driver that will wrapFn on every driver.Stmt
+       // Exec and Query call.
+       driverName := pickName("wrappeddriver")
+       sql.Register(driverName, &wrappedDriver{fn: wrapFn})
+       db, err := sql.Open(driverName, url)
+       if err != nil {
+               t.Fatal(err)
+       }
+       runtime.SetFinalizer(db, (*sql.DB).Close)
+       return db
+}
+
+type wrappedDriver struct {
+       fn func(string)
+}
+
+func (d *wrappedDriver) Open(name string) (driver.Conn, error) {
+       conn, err := pq.Open(name)
+       if err != nil {
+               return conn, err
+       }
+       return wrappedConn{fn: d.fn, backing: conn}, nil
+}
+
+type wrappedConn struct {
+       fn      func(string)
+       backing driver.Conn
+}
+
+func (c wrappedConn) Prepare(query string) (driver.Stmt, error) {
+       c.fn(query)
+       stmt, err := c.backing.Prepare(query)
+       if err != nil {
+               return stmt, err
+       }
+       return stmt, nil
+}
+
+func (c wrappedConn) Close() error {
+       return c.backing.Close()
+}
+
+func (c wrappedConn) Begin() (driver.Tx, error) {
+       return c.backing.Begin()
+}
diff --git a/database/pg/query.go b/database/pg/query.go
new file mode 100644 (file)
index 0000000..2472bfb
--- /dev/null
@@ -0,0 +1,105 @@
+package pg
+
+import (
+       "context"
+       "reflect"
+
+       "chain/errors"
+)
+
+var ErrBadRequest = errors.New("bad request")
+
+// The type of "error"
+var errorInterface = reflect.TypeOf((*error)(nil)).Elem()
+
+// ForQueryRows encapsulates a lot of boilerplate when making db queries.
+// Call it like this:
+//
+//   err = ForQueryRows(ctx, db, query, queryArg1, queryArg2, ..., func(scanVar1 type1, scanVar2 type2, ...) {
+//     ...process a row from the result...
+//   })
+//
+// This is equivalent to:
+//
+//   rows, err = db.Query(ctx, query, queryArg1, queryArg2, ...)
+//   if err != nil {
+//     return err
+//   }
+//   defer rows.Close()
+//   for rows.Next() {
+//     var (
+//       scanVar1 type1
+//       scanVar2 type2
+//     )
+//     err = rows.Scan(&scanVar1, &scanVar2, ...)
+//     if err != nil {
+//       return err
+//     }
+//     ...process a row from the result...
+//   }
+//   if err = rows.Err(); err != nil {
+//     return err
+//   }
+//
+// The callback is invoked once for each row in the result.  The
+// number and types of parameters to the callback must match the
+// values to be scanned with rows.Scan.  The space for the callback's
+// arguments is not reused between calls.  The callback may return a
+// single error-type value.  If any invocation yields a non-nil
+// result, ForQueryRows will abort and return it.
+func ForQueryRows(ctx context.Context, db DB, query string, args ...interface{}) error {
+       if len(args) == 0 {
+               return errors.Wrap(ErrBadRequest, "too few arguments")
+       }
+
+       fnArg := args[len(args)-1]
+       queryArgs := args[:len(args)-1]
+
+       fnType := reflect.TypeOf(fnArg)
+       if fnType.Kind() != reflect.Func {
+               return errors.Wrap(ErrBadRequest, "fn arg not a function")
+       }
+       if fnType.NumOut() > 1 {
+               return errors.Wrap(ErrBadRequest, "fn arg must return 0 values or 1")
+       }
+       if fnType.NumOut() == 1 && !fnType.Out(0).Implements(errorInterface) {
+               return errors.Wrap(ErrBadRequest, "fn arg return type must be error")
+       }
+
+       rows, err := db.QueryContext(ctx, query, queryArgs...)
+       if err != nil {
+               return errors.Wrap(err, "query")
+       }
+       defer rows.Close()
+
+       fnVal := reflect.ValueOf(fnArg)
+
+       argPtrVals := make([]reflect.Value, 0, fnType.NumIn())
+       scanArgs := make([]interface{}, 0, fnType.NumIn())
+       fnArgs := make([]reflect.Value, 0, fnType.NumIn())
+
+       for rows.Next() {
+               argPtrVals = argPtrVals[:0]
+               scanArgs = scanArgs[:0]
+               fnArgs = fnArgs[:0]
+               for i := 0; i < fnType.NumIn(); i++ {
+                       argType := fnType.In(i)
+                       argPtrVal := reflect.New(argType)
+                       argPtrVals = append(argPtrVals, argPtrVal)
+                       scanArgs = append(scanArgs, argPtrVal.Interface())
+               }
+               err = rows.Scan(scanArgs...)
+               if err != nil {
+                       return errors.Wrap(err, "scan")
+               }
+               for _, argPtrVal := range argPtrVals {
+                       fnArgs = append(fnArgs, argPtrVal.Elem())
+               }
+               res := fnVal.Call(fnArgs)
+               if fnType.NumOut() == 1 && !res[0].IsNil() {
+                       return errors.Wrap(res[0].Interface().(error), "callback")
+               }
+       }
+
+       return errors.Wrap(rows.Err(), "end scan")
+}
diff --git a/database/sinkdb/internal/sinkpb/gen.go b/database/sinkdb/internal/sinkpb/gen.go
new file mode 100644 (file)
index 0000000..1b46524
--- /dev/null
@@ -0,0 +1,3 @@
+package sinkpb
+
+//go:generate protoc --go_out=. op.proto snapshot.proto
diff --git a/database/sinkdb/internal/sinkpb/op.pb.go b/database/sinkdb/internal/sinkpb/op.pb.go
new file mode 100644 (file)
index 0000000..b336baa
--- /dev/null
@@ -0,0 +1,216 @@
+// Code generated by protoc-gen-go.
+// source: op.proto
+// DO NOT EDIT!
+
+/*
+Package sinkpb is a generated protocol buffer package.
+
+It is generated from these files:
+       op.proto
+       snapshot.proto
+
+It has these top-level messages:
+       Op
+       Cond
+       Instruction
+       Snapshot
+*/
+package sinkpb
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Op_Type int32
+
+const (
+       Op_SET    Op_Type = 0
+       Op_DELETE Op_Type = 1
+)
+
+var Op_Type_name = map[int32]string{
+       0: "SET",
+       1: "DELETE",
+}
+var Op_Type_value = map[string]int32{
+       "SET":    0,
+       "DELETE": 1,
+}
+
+func (x Op_Type) String() string {
+       return proto.EnumName(Op_Type_name, int32(x))
+}
+func (Op_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
+
+type Cond_Type int32
+
+const (
+       Cond_KEY_EXISTS      Cond_Type = 0
+       Cond_NOT_KEY_EXISTS  Cond_Type = 1
+       Cond_VALUE_EQUAL     Cond_Type = 2
+       Cond_NOT_VALUE_EQUAL Cond_Type = 3
+       Cond_INDEX_EQUAL     Cond_Type = 4
+       Cond_NOT_INDEX_EQUAL Cond_Type = 5
+)
+
+var Cond_Type_name = map[int32]string{
+       0: "KEY_EXISTS",
+       1: "NOT_KEY_EXISTS",
+       2: "VALUE_EQUAL",
+       3: "NOT_VALUE_EQUAL",
+       4: "INDEX_EQUAL",
+       5: "NOT_INDEX_EQUAL",
+}
+var Cond_Type_value = map[string]int32{
+       "KEY_EXISTS":      0,
+       "NOT_KEY_EXISTS":  1,
+       "VALUE_EQUAL":     2,
+       "NOT_VALUE_EQUAL": 3,
+       "INDEX_EQUAL":     4,
+       "NOT_INDEX_EQUAL": 5,
+}
+
+func (x Cond_Type) String() string {
+       return proto.EnumName(Cond_Type_name, int32(x))
+}
+func (Cond_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} }
+
+type Op struct {
+       Type  Op_Type `protobuf:"varint,1,opt,name=type,enum=sinkpb.Op_Type" json:"type,omitempty"`
+       Key   string  `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"`
+       Value []byte  `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *Op) Reset()                    { *m = Op{} }
+func (m *Op) String() string            { return proto.CompactTextString(m) }
+func (*Op) ProtoMessage()               {}
+func (*Op) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *Op) GetType() Op_Type {
+       if m != nil {
+               return m.Type
+       }
+       return Op_SET
+}
+
+func (m *Op) GetKey() string {
+       if m != nil {
+               return m.Key
+       }
+       return ""
+}
+
+func (m *Op) GetValue() []byte {
+       if m != nil {
+               return m.Value
+       }
+       return nil
+}
+
+type Cond struct {
+       Type  Cond_Type `protobuf:"varint,1,opt,name=type,enum=sinkpb.Cond_Type" json:"type,omitempty"`
+       Key   string    `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"`
+       Value []byte    `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
+       Index uint64    `protobuf:"varint,4,opt,name=index" json:"index,omitempty"`
+}
+
+func (m *Cond) Reset()                    { *m = Cond{} }
+func (m *Cond) String() string            { return proto.CompactTextString(m) }
+func (*Cond) ProtoMessage()               {}
+func (*Cond) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *Cond) GetType() Cond_Type {
+       if m != nil {
+               return m.Type
+       }
+       return Cond_KEY_EXISTS
+}
+
+func (m *Cond) GetKey() string {
+       if m != nil {
+               return m.Key
+       }
+       return ""
+}
+
+func (m *Cond) GetValue() []byte {
+       if m != nil {
+               return m.Value
+       }
+       return nil
+}
+
+func (m *Cond) GetIndex() uint64 {
+       if m != nil {
+               return m.Index
+       }
+       return 0
+}
+
+type Instruction struct {
+       Conditions []*Cond `protobuf:"bytes,1,rep,name=conditions" json:"conditions,omitempty"`
+       Operations []*Op   `protobuf:"bytes,2,rep,name=operations" json:"operations,omitempty"`
+}
+
+func (m *Instruction) Reset()                    { *m = Instruction{} }
+func (m *Instruction) String() string            { return proto.CompactTextString(m) }
+func (*Instruction) ProtoMessage()               {}
+func (*Instruction) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+func (m *Instruction) GetConditions() []*Cond {
+       if m != nil {
+               return m.Conditions
+       }
+       return nil
+}
+
+func (m *Instruction) GetOperations() []*Op {
+       if m != nil {
+               return m.Operations
+       }
+       return nil
+}
+
+func init() {
+       proto.RegisterType((*Op)(nil), "sinkpb.Op")
+       proto.RegisterType((*Cond)(nil), "sinkpb.Cond")
+       proto.RegisterType((*Instruction)(nil), "sinkpb.Instruction")
+       proto.RegisterEnum("sinkpb.Op_Type", Op_Type_name, Op_Type_value)
+       proto.RegisterEnum("sinkpb.Cond_Type", Cond_Type_name, Cond_Type_value)
+}
+
+func init() { proto.RegisterFile("op.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+       // 300 bytes of a gzipped FileDescriptorProto
+       0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x91, 0x51, 0x6b, 0xf2, 0x30,
+       0x14, 0x86, 0x4d, 0x1b, 0xfd, 0xbe, 0x1d, 0x45, 0xbb, 0xb3, 0x5d, 0x14, 0x76, 0x53, 0x3a, 0x06,
+       0x65, 0x8c, 0x5e, 0xb8, 0x5f, 0x20, 0x33, 0x17, 0x32, 0x51, 0x16, 0xeb, 0x70, 0x57, 0xa2, 0x36,
+       0x8c, 0xe2, 0x48, 0x42, 0x5b, 0x65, 0xfd, 0xbd, 0xfb, 0x23, 0x23, 0xad, 0xb2, 0xec, 0x76, 0x77,
+       0x39, 0xef, 0xf3, 0x84, 0x37, 0xe1, 0xc0, 0x7f, 0xa5, 0x63, 0x9d, 0xab, 0x52, 0x61, 0xa7, 0xc8,
+       0xe4, 0x5e, 0x6f, 0x43, 0x09, 0xce, 0x5c, 0xe3, 0x2d, 0xd0, 0xb2, 0xd2, 0xc2, 0x27, 0x01, 0x89,
+       0xfa, 0xc3, 0x41, 0xdc, 0xc0, 0x78, 0xae, 0xe3, 0xa4, 0xd2, 0x82, 0xd7, 0x10, 0x3d, 0x70, 0xf7,
+       0xa2, 0xf2, 0x9d, 0x80, 0x44, 0x17, 0xdc, 0x1c, 0xf1, 0x1a, 0xda, 0xc7, 0xcd, 0xc7, 0x41, 0xf8,
+       0x6e, 0x40, 0xa2, 0x1e, 0x6f, 0x86, 0xf0, 0x06, 0xa8, 0xb9, 0x85, 0xff, 0xc0, 0x5d, 0xb0, 0xc4,
+       0x6b, 0x21, 0x40, 0x67, 0xcc, 0xa6, 0x2c, 0x61, 0x1e, 0x09, 0xbf, 0x08, 0xd0, 0x27, 0x25, 0x53,
+       0xbc, 0xfb, 0x55, 0x79, 0x79, 0xae, 0x34, 0xec, 0x0f, 0xa5, 0x26, 0xcd, 0x64, 0x2a, 0x3e, 0x7d,
+       0x1a, 0x90, 0x88, 0xf2, 0x66, 0x08, 0x8f, 0xa7, 0xa7, 0xf4, 0x01, 0x9e, 0xd9, 0xdb, 0x9a, 0xad,
+       0x26, 0x8b, 0x64, 0xe1, 0xb5, 0x10, 0xa1, 0x3f, 0x9b, 0x27, 0x6b, 0x2b, 0x23, 0x38, 0x80, 0xee,
+       0xeb, 0x68, 0xba, 0x64, 0x6b, 0xf6, 0xb2, 0x1c, 0x4d, 0x3d, 0x07, 0xaf, 0x60, 0x60, 0x24, 0x3b,
+       0x74, 0x8d, 0x35, 0x99, 0x8d, 0xd9, 0xea, 0x14, 0xd0, 0xb3, 0x65, 0x87, 0xed, 0xf0, 0x1d, 0xba,
+       0x13, 0x59, 0x94, 0xf9, 0x61, 0x57, 0x66, 0x4a, 0xe2, 0x03, 0xc0, 0x4e, 0xc9, 0x34, 0x33, 0x43,
+       0xe1, 0x93, 0xc0, 0x8d, 0xba, 0xc3, 0x9e, 0xfd, 0x63, 0x6e, 0x71, 0xbc, 0x07, 0x50, 0x5a, 0xe4,
+       0x9b, 0xc6, 0x76, 0x6a, 0x1b, 0x7e, 0x56, 0xc2, 0x2d, 0xba, 0xed, 0xd4, 0xdb, 0x7c, 0xfc, 0x0e,
+       0x00, 0x00, 0xff, 0xff, 0xde, 0x11, 0x48, 0x9b, 0xd9, 0x01, 0x00, 0x00,
+}
diff --git a/database/sinkdb/internal/sinkpb/op.proto b/database/sinkdb/internal/sinkpb/op.proto
new file mode 100644 (file)
index 0000000..55e3e6f
--- /dev/null
@@ -0,0 +1,33 @@
+syntax = "proto3";
+
+package sinkpb;
+
+message Op {
+       enum Type {
+               SET = 0;
+               DELETE = 1;
+       }
+       Type type = 1;
+       string key = 2;
+       bytes value = 3;
+}
+
+message Cond {
+       enum Type {
+               KEY_EXISTS = 0;
+               NOT_KEY_EXISTS = 1;
+               VALUE_EQUAL = 2;
+               NOT_VALUE_EQUAL = 3;
+               INDEX_EQUAL = 4;
+               NOT_INDEX_EQUAL = 5;
+       }
+       Type type = 1;
+       string key = 2;
+       bytes value = 3;
+       uint64 index = 4;
+}
+
+message Instruction {
+       repeated Cond conditions = 1;
+       repeated Op operations = 2;
+}
diff --git a/database/sinkdb/internal/sinkpb/snapshot.pb.go b/database/sinkdb/internal/sinkpb/snapshot.pb.go
new file mode 100644 (file)
index 0000000..7d4c9f8
--- /dev/null
@@ -0,0 +1,69 @@
+// Code generated by protoc-gen-go.
+// source: snapshot.proto
+// DO NOT EDIT!
+
+package sinkpb
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type Snapshot struct {
+       Version map[string]uint64 `protobuf:"bytes,1,rep,name=version" json:"version,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
+       State   map[string][]byte `protobuf:"bytes,2,rep,name=state" json:"state,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
+       Peers   map[uint64]string `protobuf:"bytes,3,rep,name=peers" json:"peers,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+}
+
+func (m *Snapshot) Reset()                    { *m = Snapshot{} }
+func (m *Snapshot) String() string            { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage()               {}
+func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
+
+func (m *Snapshot) GetVersion() map[string]uint64 {
+       if m != nil {
+               return m.Version
+       }
+       return nil
+}
+
+func (m *Snapshot) GetState() map[string][]byte {
+       if m != nil {
+               return m.State
+       }
+       return nil
+}
+
+func (m *Snapshot) GetPeers() map[uint64]string {
+       if m != nil {
+               return m.Peers
+       }
+       return nil
+}
+
+func init() {
+       proto.RegisterType((*Snapshot)(nil), "sinkpb.Snapshot")
+}
+
+func init() { proto.RegisterFile("snapshot.proto", fileDescriptor1) }
+
+var fileDescriptor1 = []byte{
+       // 204 bytes of a gzipped FileDescriptorProto
+       0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0xce, 0x4b, 0x2c,
+       0x28, 0xce, 0xc8, 0x2f, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, 0xce, 0xcc, 0xcb,
+       0x2e, 0x48, 0x52, 0xba, 0xc9, 0xc4, 0xc5, 0x11, 0x0c, 0x95, 0x12, 0x32, 0xe7, 0x62, 0x2f, 0x4b,
+       0x2d, 0x2a, 0xce, 0xcc, 0xcf, 0x93, 0x60, 0x54, 0x60, 0xd6, 0xe0, 0x36, 0x92, 0xd5, 0x83, 0x28,
+       0xd3, 0x83, 0x29, 0xd1, 0x0b, 0x83, 0xc8, 0xbb, 0xe6, 0x95, 0x14, 0x55, 0x06, 0xc1, 0x54, 0x0b,
+       0x19, 0x72, 0xb1, 0x16, 0x97, 0x24, 0x96, 0xa4, 0x4a, 0x30, 0x81, 0xb5, 0x49, 0x63, 0x68, 0x0b,
+       0x06, 0xc9, 0x42, 0x34, 0x41, 0x54, 0x82, 0xb4, 0x14, 0xa4, 0xa6, 0x16, 0x15, 0x4b, 0x30, 0xe3,
+       0xd0, 0x12, 0x00, 0x92, 0x85, 0x6a, 0x01, 0xab, 0x94, 0xb2, 0xe2, 0xe2, 0x41, 0xb6, 0x5e, 0x48,
+       0x80, 0x8b, 0x39, 0x3b, 0xb5, 0x52, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xc4, 0x14, 0x12,
+       0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x05, 0xb9, 0x83, 0x51, 0x83, 0x25, 0x08, 0xc2, 0xb1, 0x62,
+       0xb2, 0x60, 0x94, 0xb2, 0xe0, 0xe2, 0x42, 0xb8, 0x81, 0x90, 0x4e, 0x1e, 0x34, 0x9d, 0x08, 0xa7,
+       0x20, 0xeb, 0x64, 0xc1, 0xa2, 0x93, 0x13, 0x49, 0x67, 0x12, 0x1b, 0x38, 0xa8, 0x8d, 0x01, 0x01,
+       0x00, 0x00, 0xff, 0xff, 0xc0, 0x2b, 0x23, 0xf5, 0x7c, 0x01, 0x00, 0x00,
+}
diff --git a/database/sinkdb/internal/sinkpb/snapshot.proto b/database/sinkdb/internal/sinkpb/snapshot.proto
new file mode 100644 (file)
index 0000000..2ab5e33
--- /dev/null
@@ -0,0 +1,9 @@
+syntax = "proto3";
+
+package sinkpb;
+
+message Snapshot {
+       map<string, uint64> version = 1;
+       map<string, bytes> state = 2;
+       map<uint64, string> peers = 3;
+}
diff --git a/database/sinkdb/op.go b/database/sinkdb/op.go
new file mode 100644 (file)
index 0000000..54eaa7d
--- /dev/null
@@ -0,0 +1,110 @@
+package sinkdb
+
+import (
+       "github.com/golang/protobuf/proto"
+
+       "chain/database/sinkdb/internal/sinkpb"
+)
+
+// Op represents a change to the data store.
+// Each Op starts with conditions, boolean predicates over
+// existing stored data.
+// If all conditions return true, the Op is said to be satisfied.
+// It then results in zero or more effects,
+// mutations to apply to the data.
+// If an Op is unsatisfied, it has no effect.
+// The zero value of Op is a valid operation
+// with no conditions (it is always satisfied)
+// and no effects.
+type Op struct {
+       err     error
+       conds   []*sinkpb.Cond
+       effects []*sinkpb.Op
+}
+
+// All encodes the atomic application of all its arguments.
+//
+// The returned Op is satisfied if all arguments would be satisfied.
+// Its effects (if satisfied) are the effects of the arguments.
+func All(op ...Op) Op {
+       var outer Op
+       for _, inner := range op {
+               if inner.err != nil {
+                       return inner
+               }
+               outer.conds = append(outer.conds, inner.conds...)
+               outer.effects = append(outer.effects, inner.effects...)
+       }
+       return outer
+}
+
+// IfNotExists encodes a conditional to make an instruction
+// successful only if the provided key does not exist.
+func IfNotExists(key string) Op {
+       return Op{
+               conds: []*sinkpb.Cond{{
+                       Type: sinkpb.Cond_NOT_KEY_EXISTS,
+                       Key:  key,
+               }},
+       }
+}
+
+// IfNotModified encodes a conditional to make an instruction
+// successful only if the version stored in v's key matches v.
+//
+// If v.Exists() is false,
+// IfNotModified(v) is equivalent to IfNotExists(v.Key()).
+func IfNotModified(v Version) Op {
+       return Op{
+               conds: []*sinkpb.Cond{{
+                       Type:  sinkpb.Cond_INDEX_EQUAL,
+                       Key:   v.key,
+                       Index: v.n,
+               }},
+       }
+}
+
+// Delete encodes a delete operation for key.
+func Delete(key string) Op {
+       return Op{
+               effects: []*sinkpb.Op{{
+                       Type: sinkpb.Op_DELETE,
+                       Key:  key,
+               }},
+       }
+}
+
+// Set encodes a set operation setting key to value.
+func Set(key string, value proto.Message) Op {
+       encodedValue, err := proto.Marshal(value)
+       if err != nil {
+               return Op{err: err}
+       }
+
+       return Op{
+               effects: []*sinkpb.Op{{
+                       Type:  sinkpb.Op_SET,
+                       Key:   key,
+                       Value: encodedValue,
+               }},
+       }
+}
+
+// AddAllowedMember configures sinkdb to allow the provided address
+// to participate in Raft.
+func AddAllowedMember(addr string) Op {
+       return Op{
+               effects: []*sinkpb.Op{{
+                       Key:   allowedMemberPrefix + "/" + addr,
+                       Value: []byte{0x01},
+               }},
+       }
+}
+
+// Error returns an Op representing an error condition.
+// Exec will return err, and have no effect,
+// when the returned Op is executed.
+// If err is nil, Error returns the zero Op.
+func Error(err error) Op {
+       return Op{err: err}
+}
diff --git a/database/sinkdb/sinkdb.go b/database/sinkdb/sinkdb.go
new file mode 100644 (file)
index 0000000..d43541f
--- /dev/null
@@ -0,0 +1,127 @@
+// Package sinkdb provides a strongly consistent key-value store.
+package sinkdb
+
+import (
+       "context"
+       "net/http"
+       "sort"
+       "sync"
+       "time"
+
+       "github.com/golang/protobuf/proto"
+
+       "chain/database/sinkdb/internal/sinkpb"
+       "chain/errors"
+       "chain/net/raft"
+)
+
+// ErrConflict is returned by Exec when an instruction was
+// not completed because its preconditions were not met.
+var ErrConflict = errors.New("transaction conflict")
+
+// Open initializes the key-value store and returns a database handle.
+func Open(laddr, dir string, httpClient *http.Client) (*DB, error) {
+       state := newState()
+       sv, err := raft.Start(laddr, dir, httpClient, state)
+       if err != nil {
+               return nil, err
+       }
+       db := &DB{state: state, raft: sv}
+       return db, nil
+}
+
+// DB provides access to an opened kv store.
+type DB struct {
+       mu     sync.Mutex
+       closed bool
+
+       state *state
+       raft  *raft.Service
+}
+
+// Ping peforms an empty write to verify the connection to
+// the rest of the cluster.
+func (db *DB) Ping() error {
+       const timeout = 5 * time.Second
+       ctx, cancel := context.WithTimeout(context.Background(), timeout)
+       defer cancel()
+
+       _, err := db.raft.Exec(ctx, db.state.EmptyWrite())
+       return err
+}
+
+// Close closes the database handle releasing its resources. It is
+// the caller's responsibility to ensure that there are no concurrent
+// database operations in flight. Close is idempotent.
+//
+// All other methods have undefined behavior on a closed DB.
+func (db *DB) Close() error {
+       db.mu.Lock()
+       defer db.mu.Unlock()
+       if db.closed { // make Close idempotent
+               return nil
+       }
+       db.closed = true
+       return db.raft.Stop()
+}
+
+// Exec executes the provided operations
+// after combining them with All.
+func (db *DB) Exec(ctx context.Context, ops ...Op) error {
+       all := All(ops...)
+       if all.err != nil {
+               return all.err
+       }
+
+       // Disallow multiple writes to the same key.
+       sort.Slice(all.effects, func(i, j int) bool {
+               return all.effects[i].Key < all.effects[j].Key
+       })
+       var lastKey string
+       for _, e := range all.effects {
+               if e.Key == lastKey {
+                       err := errors.New("duplicate write")
+                       return errors.Wrap(err, e.Key)
+               }
+               lastKey = e.Key
+       }
+
+       encoded, err := proto.Marshal(&sinkpb.Instruction{
+               Conditions: all.conds,
+               Operations: all.effects,
+       })
+       if err != nil {
+               return err
+       }
+       satisfied, err := db.raft.Exec(ctx, encoded)
+       if err != nil {
+               return err
+       }
+       if !satisfied {
+               return ErrConflict
+       }
+       return nil
+}
+
+// Get performs a linearizable read of the provided key. The
+// read value is unmarshalled into v.
+func (db *DB) Get(ctx context.Context, key string, v proto.Message) (Version, error) {
+       err := db.raft.WaitRead(ctx)
+       if err != nil {
+               return Version{}, err
+       }
+       buf, ver := db.state.get(key)
+       return ver, proto.Unmarshal(buf, v)
+}
+
+// GetStale performs a non-linearizable read of the provided key.
+// The value may be stale. The read value is unmarshalled into v.
+func (db *DB) GetStale(key string, v proto.Message) (Version, error) {
+       buf, ver := db.state.get(key) // read directly from state
+       return ver, proto.Unmarshal(buf, v)
+}
+
+// RaftService returns the raft service used for replication.
+func (db *DB) RaftService() *raft.Service {
+       return db.raft
+}
diff --git a/database/sinkdb/sinkdb_test.go b/database/sinkdb/sinkdb_test.go
new file mode 100644 (file)
index 0000000..473d4a3
--- /dev/null
@@ -0,0 +1,56 @@
+package sinkdb
+
+import (
+       "context"
+       "io/ioutil"
+       "net/http"
+       "os"
+       "testing"
+)
+
+func TestRestartDB(t *testing.T) {
+       ctx := context.Background()
+
+       raftDir, err := ioutil.TempDir("", "sinkdb")
+       if err != nil {
+               t.Fatal(err)
+       }
+       defer os.RemoveAll(raftDir)
+
+       // Create a new fresh db and add an allowed member.
+       sdb1, err := Open("", raftDir, new(http.Client))
+       if err != nil {
+               t.Fatal(err)
+       }
+       defer sdb1.Close()
+       err = sdb1.RaftService().Init()
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = sdb1.Exec(ctx, AddAllowedMember("1234"))
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = sdb1.Close()
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // Re-open the database and verify that the write is still there.
+       sdb2, err := Open("", raftDir, new(http.Client))
+       if err != nil {
+               t.Fatal(err)
+       }
+       defer sdb2.Close()
+       err = sdb2.RaftService().WaitRead(ctx)
+       if err != nil {
+               t.Fatal(err)
+       }
+       if !sdb2.state.IsAllowedMember("1234") {
+               t.Error("expected allowed member to be persisted, but it wasn't")
+       }
+       err = sdb2.Close()
+       if err != nil {
+               t.Fatal(err)
+       }
+}
diff --git a/database/sinkdb/sinkdbtest/sinkdbtest.go b/database/sinkdb/sinkdbtest/sinkdbtest.go
new file mode 100644 (file)
index 0000000..931a875
--- /dev/null
@@ -0,0 +1,54 @@
+package sinkdbtest
+
+import (
+       "io/ioutil"
+       "net/http"
+       "os"
+       "path/filepath"
+       "runtime"
+       "strings"
+       "testing"
+       "time"
+
+       "chain/database/sinkdb"
+)
+
+const dataDirectoryPrefix = `chain-syncdbtest`
+
+// NewDB creates a new sinkdb instance with a random temporary
+// storage directory and a new single-node raft cluster.
+func NewDB(t testing.TB) *sinkdb.DB {
+       gcDataDirectories() // clean up old data directories from previous tests
+
+       tempDir, err := ioutil.TempDir("", dataDirectoryPrefix)
+       if err != nil {
+               t.Fatal(err)
+       }
+       sdb, err := sinkdb.Open("", tempDir, new(http.Client))
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = sdb.RaftService().Init()
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // set a finalizer to close the DB to reclaim file descriptors, etc.
+       runtime.SetFinalizer(sdb, (*sinkdb.DB).Close)
+       return sdb
+}
+
+func gcDataDirectories() {
+       tempDir := os.TempDir()
+       cutoff := time.Now().Add(-time.Hour * 24)
+       dirents, _ := ioutil.ReadDir(tempDir)
+       for _, dirent := range dirents {
+               if !strings.HasPrefix(dirent.Name(), dataDirectoryPrefix) {
+                       continue
+               }
+               if dirent.ModTime().After(cutoff) {
+                       continue
+               }
+               os.RemoveAll(filepath.Join(tempDir, dirent.Name()))
+       }
+}
diff --git a/database/sinkdb/state.go b/database/sinkdb/state.go
new file mode 100644 (file)
index 0000000..022b8bf
--- /dev/null
@@ -0,0 +1,233 @@
+package sinkdb
+
+import (
+       "bytes"
+       "sync"
+
+       "github.com/golang/protobuf/proto"
+
+       "chain/database/sinkdb/internal/sinkpb"
+       "chain/errors"
+)
+
+const (
+       nextNodeID          = "raft/nextNodeID"
+       allowedMemberPrefix = "/raft/allowed"
+)
+
+// state is a general-purpose data store designed to accumulate
+// and apply replicated updates from a raft log.
+type state struct {
+       mu           sync.Mutex
+       state        map[string][]byte
+       peers        map[uint64]string // id -> addr
+       appliedIndex uint64
+       version      map[string]uint64 //key -> value index
+}
+
+// newState returns a new State.
+func newState() *state {
+       return &state{
+               state:   map[string][]byte{nextNodeID: []byte("2")},
+               peers:   make(map[uint64]string),
+               version: make(map[string]uint64),
+       }
+}
+
+// SetAppliedIndex sets the applied index to the provided index.
+func (s *state) SetAppliedIndex(index uint64) {
+       s.mu.Lock()
+       defer s.mu.Unlock()
+       s.appliedIndex = index
+}
+
+// Peers returns the current set of peer nodes. The returned
+// map must not be modified.
+func (s *state) Peers() map[uint64]string {
+       s.mu.Lock()
+       defer s.mu.Unlock()
+       return s.peers
+}
+
+// SetPeerAddr sets the address for the given peer.
+func (s *state) SetPeerAddr(id uint64, addr string) {
+       newPeers := make(map[uint64]string)
+       s.mu.Lock()
+       defer s.mu.Unlock()
+       for nodeID, addr := range s.peers {
+               newPeers[nodeID] = addr
+       }
+       newPeers[id] = addr
+       s.peers = newPeers
+}
+
+// RemovePeerAddr deletes the current address for the given peer if it exists.
+func (s *state) RemovePeerAddr(id uint64) {
+       newPeers := make(map[uint64]string)
+       s.mu.Lock()
+       defer s.mu.Unlock()
+       for nodeID, addr := range s.peers {
+               if nodeID == id {
+                       continue
+               }
+               newPeers[nodeID] = addr
+       }
+       s.peers = newPeers
+}
+
+// RestoreSnapshot decodes data and overwrites the contents of s.
+// It should be called with the retrieved snapshot
+// when bootstrapping a new node from an existing cluster
+// or when recovering from a file on disk.
+func (s *state) RestoreSnapshot(data []byte, index uint64) error {
+       s.mu.Lock()
+       defer s.mu.Unlock()
+
+       s.appliedIndex = index
+       //TODO (ameets): think about having sinkpb in state for restore
+       snapshot := &sinkpb.Snapshot{}
+       err := proto.Unmarshal(data, snapshot)
+       s.peers = snapshot.Peers
+       s.state = snapshot.State
+       s.version = snapshot.Version
+       return errors.Wrap(err)
+}
+
+// Snapshot returns an encoded copy of s suitable for RestoreSnapshot.
+func (s *state) Snapshot() ([]byte, uint64, error) {
+       s.mu.Lock()
+       defer s.mu.Unlock()
+
+       data, err := proto.Marshal(&sinkpb.Snapshot{
+               Version: s.version,
+               State:   s.state,
+               Peers:   s.peers,
+       })
+       return data, s.appliedIndex, errors.Wrap(err)
+}
+
+// Apply applies a raft log entry payload to s. For conditional operations, it
+// returns whether the condition was satisfied.
+func (s *state) Apply(data []byte, index uint64) (satisfied bool) {
+       s.mu.Lock()
+       defer s.mu.Unlock()
+
+       if index < s.appliedIndex {
+               panic(errors.New("entry already applied"))
+       }
+       instr := &sinkpb.Instruction{}
+       err := proto.Unmarshal(data, instr)
+       if err != nil {
+               // An error here indicates a malformed update
+               // was written to the raft log. We do version
+               // negotiation in the transport layer, so this
+               // should be impossible; by this point, we are
+               // all speaking the same version.
+               panic(err)
+       }
+
+       s.appliedIndex = index
+       for _, cond := range instr.Conditions {
+               y := true
+               switch cond.Type {
+
+               case sinkpb.Cond_NOT_KEY_EXISTS:
+                       y = false
+                       fallthrough
+               case sinkpb.Cond_KEY_EXISTS:
+                       if _, ok := s.state[cond.Key]; ok != y {
+                               return false
+                       }
+               case sinkpb.Cond_NOT_VALUE_EQUAL:
+                       y = false
+                       fallthrough
+               case sinkpb.Cond_VALUE_EQUAL:
+                       if ok := bytes.Equal(s.state[cond.Key], cond.Value); ok != y {
+                               return false
+                       }
+               case sinkpb.Cond_NOT_INDEX_EQUAL:
+                       y = false
+                       fallthrough
+               case sinkpb.Cond_INDEX_EQUAL:
+                       if ok := (s.version[cond.Key] == cond.Index); ok != y {
+                               return false
+                       }
+               default:
+                       panic(errors.New("unknown condition type"))
+               }
+       }
+       for _, op := range instr.Operations {
+               switch op.Type {
+               case sinkpb.Op_SET:
+                       s.state[op.Key] = op.Value
+                       s.version[op.Key] = index
+               case sinkpb.Op_DELETE:
+                       delete(s.state, op.Key)
+                       delete(s.version, op.Key)
+               default:
+                       panic(errors.New("unknown operation type"))
+               }
+       }
+       return true
+}
+
+// get performs a provisional read operation.
+func (s *state) get(key string) ([]byte, Version) {
+       s.mu.Lock()
+       defer s.mu.Unlock()
+
+       b, ok := s.state[key]
+       n := s.version[key]
+       return b, Version{key, ok, n}
+}
+
+// AppliedIndex returns the raft log index (applied index) of current state
+func (s *state) AppliedIndex() uint64 {
+       s.mu.Lock()
+       defer s.mu.Unlock()
+
+       return s.appliedIndex
+}
+
+// NextNodeID generates an ID for the next node to join the cluster.
+func (s *state) NextNodeID() (id, version uint64) {
+       s.mu.Lock()
+       defer s.mu.Unlock()
+
+       id, n := proto.DecodeVarint(s.state[nextNodeID])
+       if n == 0 {
+               panic("raft: cannot decode nextNodeID")
+       }
+       return id, s.version[nextNodeID]
+}
+
+func (s *state) IsAllowedMember(addr string) bool {
+       _, ver := s.get(allowedMemberPrefix + "/" + addr)
+       return ver.Exists()
+}
+
+func (s *state) IncrementNextNodeID(oldID uint64, index uint64) (instruction []byte) {
+       instruction, _ = proto.Marshal(&sinkpb.Instruction{
+               Conditions: []*sinkpb.Cond{{
+                       Type:  sinkpb.Cond_INDEX_EQUAL,
+                       Key:   nextNodeID,
+                       Index: index,
+               }},
+               Operations: []*sinkpb.Op{{
+                       Type:  sinkpb.Op_SET,
+                       Key:   nextNodeID,
+                       Value: proto.EncodeVarint(oldID + 1),
+               }},
+       })
+       return instruction
+}
+
+func (s *state) EmptyWrite() (instruction []byte) {
+       instruction, _ = proto.Marshal(&sinkpb.Instruction{
+               Operations: []*sinkpb.Op{{
+                       Type:  sinkpb.Op_SET,
+                       Key:   "/dummyWrite",
+                       Value: []byte(""),
+               }}})
+       return instruction
+}
diff --git a/database/sinkdb/state_test.go b/database/sinkdb/state_test.go
new file mode 100644 (file)
index 0000000..7a84ae8
--- /dev/null
@@ -0,0 +1,68 @@
+package sinkdb
+
+import (
+       "context"
+       "io/ioutil"
+       "net/http"
+       "os"
+       "reflect"
+       "testing"
+)
+
+func TestRemovePeerAddr(t *testing.T) {
+       s := state{peers: map[uint64]string{1: "1.2.3.4:567"}}
+       wantPeers := map[uint64]string{}
+
+       s.RemovePeerAddr(1)
+       if !reflect.DeepEqual(s.peers, wantPeers) {
+               t.Errorf("RemovePeerAddr(%d) => %v want %v", 1, s.peers, wantPeers)
+       }
+}
+
+func TestSetPeerAddr(t *testing.T) {
+       s := newState()
+       wantPeers := map[uint64]string{1: "1.2.3.4:567"}
+
+       s.SetPeerAddr(1, "1.2.3.4:567")
+       if !reflect.DeepEqual(s.peers, wantPeers) {
+               t.Errorf("s.SetPeerAddr(1, \"1.2.3.4:567\") => %v, want %v", s.peers, wantPeers)
+       }
+}
+
+func TestGetPeerAddr(t *testing.T) {
+       s := newState()
+       s.SetPeerAddr(1, "1.2.3.4:567")
+       want := map[uint64]string{1: "1.2.3.4:567"}
+
+       got := s.Peers()
+       if !reflect.DeepEqual(got, want) {
+               t.Errorf("s.GetPeerAddr(1) = %v, want %v", got, want)
+       }
+}
+
+func TestAllowedMember(t *testing.T) {
+       raftDir, err := ioutil.TempDir("", "sinkdb")
+       if err != nil {
+               t.Fatal(err)
+       }
+       defer os.RemoveAll(raftDir)
+
+       sdb, err := Open("", raftDir, new(http.Client))
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = sdb.RaftService().Init()
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = sdb.Exec(context.Background(), AddAllowedMember("1234"))
+       if err != nil {
+               t.Fatal("unexpected error", err)
+       }
+       if !sdb.state.IsAllowedMember("1234") {
+               t.Fatal("expected 1234 to be a potential member")
+       }
+       if sdb.state.IsAllowedMember("5678") {
+               t.Fatal("expected 5678 to not be a potential member")
+       }
+}
diff --git a/database/sinkdb/version.go b/database/sinkdb/version.go
new file mode 100644 (file)
index 0000000..1f7416c
--- /dev/null
@@ -0,0 +1,23 @@
+package sinkdb
+
+// Version records the version of a particular key
+// when it is read.
+// Every time a key is set, its version changes.
+type Version struct {
+       key string
+       ok  bool
+       n   uint64 // raft log index
+}
+
+// Exists returns whether v's key exists
+// when it was read.
+func (v Version) Exists() bool {
+       // TODO(jackson): use v.n != 0 once we've backfilled versions
+       // for Chain Core 1.2.x snapshots.
+       return v.ok
+}
+
+// Key returns the key for which v is valid.
+func (v Version) Key() string {
+       return v.key
+}
diff --git a/database/sqlutil/doc.go b/database/sqlutil/doc.go
new file mode 100644 (file)
index 0000000..2ee72ed
--- /dev/null
@@ -0,0 +1,2 @@
+// Package sqlutil provides SQL-related utility functions.
+package sqlutil
diff --git a/database/sqlutil/logdriver.go b/database/sqlutil/logdriver.go
new file mode 100644 (file)
index 0000000..ceb860c
--- /dev/null
@@ -0,0 +1,87 @@
+package sqlutil
+
+import (
+       "context"
+       "database/sql/driver"
+       "fmt"
+
+       "chain/log"
+)
+
+// TODO(kr): many databases—Postgres in particular—report the
+// execution time of each query or statement as measured on the
+// database backend. Find a way to record that timing info in
+// the trace.
+
+const maxArgsLogLen = 20 // bytes
+
+func logQuery(ctx context.Context, query string, args interface{}) {
+       s := fmt.Sprint(args)
+       if len(s) > maxArgsLogLen {
+               s = s[:maxArgsLogLen-3] + "..."
+       }
+       log.Printkv(ctx, "query", query, "args", s)
+}
+
+type logDriver struct {
+       driver driver.Driver
+}
+
+// LogDriver returns a Driver that logs each query
+// before forwarding it to d.
+func LogDriver(d driver.Driver) driver.Driver {
+       return &logDriver{d}
+}
+
+func (ld *logDriver) Open(name string) (driver.Conn, error) {
+       c, err := ld.driver.Open(name)
+       return &logConn{c}, err
+}
+
+type logConn struct {
+       driver.Conn
+}
+
+func (lc *logConn) Prepare(query string) (driver.Stmt, error) {
+       stmt, err := lc.Conn.Prepare(query)
+       return &logStmt{query, stmt}, err
+}
+
+func (lc *logConn) Exec(query string, args []driver.Value) (driver.Result, error) {
+       execer, ok := lc.Conn.(driver.Execer)
+       if !ok {
+               return nil, driver.ErrSkip
+       }
+       logQuery(context.Background(), query, args)
+       return execer.Exec(query, args)
+}
+
+func (lc *logConn) Query(query string, args []driver.Value) (driver.Rows, error) {
+       queryer, ok := lc.Conn.(driver.Queryer)
+       if !ok {
+               return nil, driver.ErrSkip
+       }
+       logQuery(context.Background(), query, args)
+       return queryer.Query(query, args)
+}
+
+// TODO(kr): implement context variants
+// (but don't bother until lib/pq does first).
+//func (lc *logConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error)
+//func (lc *logConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error)
+//func (lc *logConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error)
+
+type logStmt struct {
+       query string
+       driver.Stmt
+}
+
+func (ls *logStmt) Exec(args []driver.Value) (driver.Result, error) {
+       logQuery(context.Background(), ls.query, args)
+       return ls.Stmt.Exec(args)
+}
+
+func (ls *logStmt) Query(args []driver.Value) (driver.Rows, error) {
+       logQuery(context.Background(), ls.query, args)
+       return ls.Stmt.Query(args)
+}
diff --git a/encoding/blockchain/blockchain.go b/encoding/blockchain/blockchain.go
new file mode 100644 (file)
index 0000000..fe803b2
--- /dev/null
@@ -0,0 +1,203 @@
+// Package blockchain provides the tools for encoding
+// data primitives in blockchain structures
+package blockchain
+
+import (
+       "encoding/binary"
+       "errors"
+       "io"
+       "math"
+       "sync"
+
+       "chain/encoding/bufpool"
+)
+
+var bufPool = sync.Pool{New: func() interface{} { return new([9]byte) }}
+
+var ErrRange = errors.New("value out of range")
+
+// Reader wraps a buffer and provides utilities for decoding
+// data primitives in blockchain structures. Its various read
+// calls may return a slice of the underlying buffer.
+type Reader struct {
+       buf []byte
+}
+
+// NewReader constructs a new reader with the provided bytes. It
+// does not create a copy of the bytes, so the caller is responsible
+// for copying the bytes if necessary.
+func NewReader(b []byte) *Reader {
+       return &Reader{buf: b}
+}
+
+// Len returns the number of unread bytes.
+func (r *Reader) Len() int {
+       return len(r.buf)
+}
+
+// ReadByte reads and returns the next byte from the input.
+//
+// It implements the io.ByteReader interface.
+func (r *Reader) ReadByte() (byte, error) {
+       if len(r.buf) == 0 {
+               return 0, io.EOF
+       }
+
+       b := r.buf[0]
+       r.buf = r.buf[1:]
+       return b, nil
+}
+
+// Read reads up to len(p) bytes into p. It implements
+// the io.Reader interface.
+func (r *Reader) Read(p []byte) (n int, err error) {
+       n = copy(p, r.buf)
+       r.buf = r.buf[n:]
+       if len(r.buf) == 0 {
+               err = io.EOF
+       }
+       return
+}
+
+func ReadVarint31(r *Reader) (uint32, error) {
+       val, err := binary.ReadUvarint(r)
+       if err != nil {
+               return 0, err
+       }
+       if val > math.MaxInt32 {
+               return 0, ErrRange
+       }
+       return uint32(val), nil
+}
+
+func ReadVarint63(r *Reader) (uint64, error) {
+       val, err := binary.ReadUvarint(r)
+       if err != nil {
+               return 0, err
+       }
+       if val > math.MaxInt64 {
+               return 0, ErrRange
+       }
+       return val, nil
+}
+
+func ReadVarstr31(r *Reader) ([]byte, error) {
+       l, err := ReadVarint31(r)
+       if err != nil {
+               return nil, err
+       }
+       if l == 0 {
+               return nil, nil
+       }
+       if int(l) > len(r.buf) {
+               return nil, io.ErrUnexpectedEOF
+       }
+       str := r.buf[:l]
+       r.buf = r.buf[l:]
+       return str, nil
+}
+
+// ReadVarstrList reads a varint31 length prefix followed by
+// that many varstrs.
+func ReadVarstrList(r *Reader) (result [][]byte, err error) {
+       nelts, err := ReadVarint31(r)
+       if err != nil {
+               return nil, err
+       }
+       if nelts == 0 {
+               return nil, nil
+       }
+
+       for ; nelts > 0 && err == nil; nelts-- {
+               var s []byte
+               s, err = ReadVarstr31(r)
+               result = append(result, s)
+       }
+       if len(result) < int(nelts) {
+               err = io.ErrUnexpectedEOF
+       }
+       return result, err
+}
+
+// ReadExtensibleString reads a varint31 length prefix and that many
+// bytes from r. It then calls the given function to consume those
+// bytes, returning any unconsumed suffix.
+func ReadExtensibleString(r *Reader, f func(*Reader) error) (suffix []byte, err error) {
+       s, err := ReadVarstr31(r)
+       if err != nil {
+               return nil, err
+       }
+
+       sr := NewReader(s)
+       err = f(sr)
+       if err != nil {
+               return nil, err
+       }
+       return sr.buf, nil
+}
+
+func WriteVarint31(w io.Writer, val uint64) (int, error) {
+       if val > math.MaxInt32 {
+               return 0, ErrRange
+       }
+       buf := bufPool.Get().(*[9]byte)
+       n := binary.PutUvarint(buf[:], val)
+       b, err := w.Write(buf[:n])
+       bufPool.Put(buf)
+       return b, err
+}
+
+func WriteVarint63(w io.Writer, val uint64) (int, error) {
+       if val > math.MaxInt64 {
+               return 0, ErrRange
+       }
+       buf := bufPool.Get().(*[9]byte)
+       n := binary.PutUvarint(buf[:], val)
+       b, err := w.Write(buf[:n])
+       bufPool.Put(buf)
+       return b, err
+}
+
+func WriteVarstr31(w io.Writer, str []byte) (int, error) {
+       n, err := WriteVarint31(w, uint64(len(str)))
+       if err != nil {
+               return n, err
+       }
+       n2, err := w.Write(str)
+       return n + n2, err
+}
+
+// WriteVarstrList writes a varint31 length prefix followed by the
+// elements of l as varstrs.
+func WriteVarstrList(w io.Writer, l [][]byte) (int, error) {
+       n, err := WriteVarint31(w, uint64(len(l)))
+       if err != nil {
+               return n, err
+       }
+       for _, s := range l {
+               n2, err := WriteVarstr31(w, s)
+               n += n2
+               if err != nil {
+                       return n, err
+               }
+       }
+       return n, err
+}
+
+// WriteExtensibleString sends the output of the given function, plus
+// the given suffix, to w, together with a varint31 length prefix.
+func WriteExtensibleString(w io.Writer, suffix []byte, f func(io.Writer) error) (int, error) {
+       buf := bufpool.Get()
+       defer bufpool.Put(buf)
+       err := f(buf)
+       if err != nil {
+               return 0, err
+       }
+       if len(suffix) > 0 {
+               _, err := buf.Write(suffix)
+               if err != nil {
+                       return 0, err
+               }
+       }
+       return WriteVarstr31(w, buf.Bytes())
+}
diff --git a/encoding/blockchain/blockchain_test.go b/encoding/blockchain/blockchain_test.go
new file mode 100644 (file)
index 0000000..0a8a1a9
--- /dev/null
@@ -0,0 +1,375 @@
+package blockchain
+
+import (
+       "bytes"
+       "io"
+       "io/ioutil"
+       "math"
+       "reflect"
+       "testing"
+       "testing/quick"
+
+       "chain/testutil"
+)
+
+func BenchmarkReadVarint31(b *testing.B) {
+       data := []byte{0xff, 0xff, 0xff, 0xff, 0x01}
+       r := NewReader(data)
+       for i := 0; i < b.N; i++ {
+               r.buf = data
+               ReadVarint31(r)
+       }
+}
+
+func BenchmarkReadVarint63(b *testing.B) {
+       data := []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01}
+       r := NewReader(data)
+       for i := 0; i < b.N; i++ {
+               r.buf = data
+               ReadVarint63(r)
+       }
+}
+
+func BenchmarkWriteVarint31(b *testing.B) {
+       n := uint64(math.MaxInt32)
+       for i := 0; i < b.N; i++ {
+               WriteVarint31(ioutil.Discard, n)
+       }
+}
+
+func BenchmarkWriteVarint63(b *testing.B) {
+       n := uint64(math.MaxInt64)
+       for i := 0; i < b.N; i++ {
+               WriteVarint63(ioutil.Discard, n)
+       }
+}
+
+func TestVarint31(t *testing.T) {
+       cases := []struct {
+               n       uint64
+               want    []byte
+               wantErr error
+       }{
+               {
+                       n:    0,
+                       want: []byte{0},
+               },
+               {
+                       n:    500,
+                       want: []byte{0xf4, 0x03},
+               },
+               {
+                       n:       math.MaxInt32 + 1,
+                       wantErr: ErrRange,
+               },
+       }
+
+       for _, c := range cases {
+               b := new(bytes.Buffer)
+               n, err := WriteVarint31(b, c.n)
+               if c.wantErr != err {
+                       t.Errorf("WriteVarint31(%d): err %v, want %v", c.n, err, c.wantErr)
+                       continue
+               }
+               if c.wantErr != nil {
+                       continue
+               }
+               if n != len(c.want) {
+                       t.Errorf("WriteVarint31(%d): wrote %d byte(s), want %d", c.n, n, len(c.want))
+               }
+               if !bytes.Equal(c.want, b.Bytes()) {
+                       t.Errorf("WriteVarint31(%d): got %x, want %x", c.n, b.Bytes(), c.want)
+               }
+               v, err := ReadVarint31(NewReader(b.Bytes()))
+               if err != nil {
+                       t.Fatal(err)
+               }
+               if uint64(v) != c.n {
+                       t.Errorf("ReadVarint31 got %d, want %d", v, c.n)
+               }
+       }
+}
+
+func TestVarint63(t *testing.T) {
+       cases := []struct {
+               n       uint64
+               want    []byte
+               wantErr error
+       }{
+               {
+                       n:    0,
+                       want: []byte{0},
+               },
+               {
+                       n:    500,
+                       want: []byte{0xf4, 0x03},
+               },
+               {
+                       n:    math.MaxInt32 + 1,
+                       want: []byte{0x80, 0x80, 0x80, 0x80, 0x08},
+               },
+               {
+                       n:       math.MaxInt64 + 1,
+                       wantErr: ErrRange,
+               },
+       }
+
+       for _, c := range cases {
+               b := new(bytes.Buffer)
+               n, err := WriteVarint63(b, c.n)
+               if c.wantErr != err {
+                       t.Errorf("WriteVarint63(%d): err %v, want %v", c.n, err, c.wantErr)
+                       continue
+               }
+               if c.wantErr != nil {
+                       continue
+               }
+               if n != len(c.want) {
+                       t.Errorf("WriteVarint63(%d): wrote %d byte(s), want %d", c.n, n, len(c.want))
+               }
+               if !bytes.Equal(c.want, b.Bytes()) {
+                       t.Errorf("WriteVarint63(%d): got %x, want %x", c.n, b.Bytes(), c.want)
+               }
+               v, err := ReadVarint63(NewReader(b.Bytes()))
+               if err != nil {
+                       t.Fatal(err)
+               }
+               if uint64(v) != c.n {
+                       t.Errorf("ReadVarint63 got %d, want %d", v, c.n)
+               }
+       }
+}
+
+func TestVarstring31(t *testing.T) {
+       s := []byte{10, 11, 12}
+       b := new(bytes.Buffer)
+       _, err := WriteVarstr31(b, s)
+       if err != nil {
+               t.Fatal(err)
+       }
+       want := []byte{3, 10, 11, 12}
+       if !bytes.Equal(b.Bytes(), want) {
+               t.Errorf("got %x, want %x", b.Bytes(), want)
+       }
+       s, err = ReadVarstr31(NewReader(want))
+       if err != nil {
+               t.Fatal(err)
+       }
+       want = []byte{10, 11, 12}
+       if !bytes.Equal(s, want) {
+               t.Errorf("got %x, expected %x", s, want)
+       }
+}
+
+func TestEmptyVarstring31(t *testing.T) {
+       s := []byte{}
+       b := new(bytes.Buffer)
+       _, err := WriteVarstr31(b, s)
+       if err != nil {
+               t.Fatal(err)
+       }
+       want := []byte{0x00}
+       if !bytes.Equal(b.Bytes(), want) {
+               t.Errorf("got %x, want %x", b.Bytes(), want)
+       }
+
+       s, err = ReadVarstr31(NewReader(want))
+       if err != nil {
+               t.Fatal(err)
+       }
+       want = nil // we deliberately return nil for empty strings to avoid unnecessary byteslice allocation
+       if !bytes.Equal(s, want) {
+               t.Errorf("got %x, expected %x", s, want)
+       }
+}
+
+// TestTooLongVarstring31 tests decoding a varstring31 with a leading
+// length too long to fit in memory. Reading such a varstring31 should
+// not try to allocate more memory than feasible.
+func TestTooLongVarstring31(t *testing.T) {
+       var buf bytes.Buffer
+       WriteVarint31(&buf, 0x7fffffff)
+       buf.Write([]byte{0x01, 0x02, 0x03})
+
+       _, err := ReadVarstr31(NewReader(buf.Bytes()))
+       if err != io.ErrUnexpectedEOF {
+               t.Errorf("got %s, want io.ErrUnexpectedEOF", err)
+       }
+}
+
+func TestVarstrList(t *testing.T) {
+       for i := 0; i < 4; i++ {
+               // make a list of i+1 strs, each with length i+1, each made of repeating byte i
+               strs := make([][]byte, 0, i+1)
+               for j := 0; j <= i; j++ {
+                       str := make([]byte, 0, i+1)
+                       for k := 0; k <= i; k++ {
+                               str = append(str, byte(i))
+                       }
+                       strs = append(strs, str)
+               }
+               var buf bytes.Buffer
+               _, err := WriteVarstrList(&buf, strs)
+               if err != nil {
+                       t.Fatal(err)
+               }
+               strs2, err := ReadVarstrList(NewReader(buf.Bytes()))
+               if err != nil {
+                       t.Fatal(err)
+               }
+               if !reflect.DeepEqual(strs, strs2) {
+                       t.Errorf("got %v, want %v", strs2, strs)
+               }
+       }
+}
+
+func TestVarstrListWithEOF(t *testing.T) {
+       var buf bytes.Buffer
+       WriteVarint31(&buf, 3)
+       WriteVarstr31(&buf, []byte{0x01})
+       WriteVarstr31(&buf, []byte{0x02})
+       WriteVarstr31(&buf, []byte{0x03})
+
+       want := [][]byte{[]byte{0x01}, []byte{0x02}, []byte{0x03}}
+       got, err := ReadVarstrList(NewReader(buf.Bytes()))
+       if !reflect.DeepEqual(got, want) {
+               t.Errorf("got %#v, want %#v", got, want)
+       }
+       if err != nil && err != io.EOF {
+               t.Errorf("got %s want nil or io.EOF", err)
+       }
+}
+
+// TestTooLongVarstrList tests decoding a VarstrList that has a leading
+// element count much longer than the actual list. Reading such a
+// varstrlist shouldn't try to allocate more memory than feasible.
+func TestTooLongVarstrList(t *testing.T) {
+       var buf bytes.Buffer
+       WriteVarint31(&buf, math.MaxInt32)
+       WriteVarstr31(&buf, []byte{0x01})
+       WriteVarstr31(&buf, []byte{0x02})
+       WriteVarstr31(&buf, []byte{0x03})
+
+       _, err := ReadVarstrList(NewReader(buf.Bytes()))
+       if err != io.ErrUnexpectedEOF {
+               t.Errorf("got %s, expected io.EOF", err)
+       }
+}
+
+func TestExtensibleString(t *testing.T) {
+       for i := 0; i < 4; i++ {
+               // make a string of length i+1
+               str := make([]byte, 0, i+1)
+               for j := 0; j <= i; j++ {
+                       str = append(str, byte(i))
+               }
+               var buf bytes.Buffer
+               _, err := WriteExtensibleString(&buf, nil, func(w io.Writer) error {
+                       _, err := w.Write(str)
+                       return err
+               })
+               if err != nil {
+                       t.Fatal(err)
+               }
+               var str2 []byte
+               b := buf.Bytes()
+               suffix, err := ReadExtensibleString(NewReader(b), func(r *Reader) error {
+                       str2, err = ioutil.ReadAll(r)
+                       return err
+               })
+               if err != nil {
+                       t.Fatal(err)
+               }
+               if len(suffix) > 0 {
+                       t.Errorf("got suffix %x, want empty suffix", suffix)
+               }
+               if !bytes.Equal(str, str2) {
+                       t.Errorf("got %x, want %x", str2, str)
+               }
+               _, err = ReadExtensibleString(NewReader(b[:i]), func(r *Reader) error {
+                       return nil
+               })
+               switch err {
+               case nil:
+                       t.Errorf("got no error, want io.EOF")
+               case io.EOF, io.ErrUnexpectedEOF:
+               default:
+                       t.Errorf("got error %s, want io.EOF", err)
+               }
+               _, err = ReadExtensibleString(NewReader(b), func(r *Reader) error {
+                       return nil
+               })
+               if err != nil {
+                       t.Error(err)
+               }
+               suffix, err = ReadExtensibleString(NewReader(b), func(r *Reader) error {
+                       return nil
+               })
+               if err != nil {
+                       t.Error(err)
+               }
+               if !bytes.Equal(str, suffix) {
+                       t.Errorf("got suffix %x, want %x", suffix, str)
+               }
+       }
+}
+
+func TestReadWriteVarint31(t *testing.T) {
+       f := func(x uint32) bool {
+               var buf bytes.Buffer
+               _, err := WriteVarint31(&buf, uint64(x))
+               if err == ErrRange {
+                       return x > math.MaxInt32
+               }
+               v, err := ReadVarint31(NewReader(buf.Bytes()))
+               return uint32(v) == x && err == nil
+       }
+       if err := quick.Check(f, nil); err != nil {
+               t.Error(err)
+       }
+}
+
+func TestReadWriteVarint63(t *testing.T) {
+       f := func(x uint64) bool {
+               var buf bytes.Buffer
+               _, err := WriteVarint63(&buf, x)
+               if err == ErrRange {
+                       return x > math.MaxInt64
+               }
+               v, err := ReadVarint63(NewReader(buf.Bytes()))
+               return v == x && err == nil
+       }
+       if err := quick.Check(f, nil); err != nil {
+               t.Error(err)
+       }
+}
+
+func TestReadWriteVarstr31(t *testing.T) {
+       f := func(x []byte) bool {
+               var buf bytes.Buffer
+               _, err := WriteVarstr31(&buf, x)
+               if err != nil {
+                       return false
+               }
+               got, err := ReadVarstr31(NewReader(buf.Bytes()))
+               return bytes.Equal(got, x) && err == nil
+       }
+       if err := quick.Check(f, nil); err != nil {
+               t.Error(err)
+       }
+}
+
+func TestReadWriteVarstrList(t *testing.T) {
+       f := func(x [][]byte) bool {
+               var buf bytes.Buffer
+               _, err := WriteVarstrList(&buf, x)
+               if err != nil {
+                       return false
+               }
+               got, err := ReadVarstrList(NewReader(buf.Bytes()))
+               return testutil.DeepEqual(got, x) && err == nil
+       }
+       if err := quick.Check(f, nil); err != nil {
+               t.Error(err)
+       }
+}
diff --git a/encoding/bufpool/bufpool.go b/encoding/bufpool/bufpool.go
new file mode 100644 (file)
index 0000000..bd59317
--- /dev/null
@@ -0,0 +1,35 @@
+// Package bufpool is a freelist for bytes.Buffer objects.
+package bufpool
+
+import (
+       "bytes"
+       "sync"
+)
+
+var pool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }}
+
+// Get returns an initialized bytes.Buffer object.
+// It is like new(bytes.Buffer) except it uses the free list.
+// The caller should call Put when finished with the returned object.
+// Since Buffer.Bytes() returns the buffer's underlying slice,
+// it is not safe for that slice to escape the caller.
+// If the bytes need to escape, CopyBytes should be used.
+func Get() *bytes.Buffer {
+       return pool.Get().(*bytes.Buffer)
+}
+
+// Put resets the buffer and adds it to the freelist.
+func Put(b *bytes.Buffer) {
+       b.Reset()
+       pool.Put(b)
+}
+
+// CopyBytes returns a copy of the bytes contained in the buffer.
+// This slice is safe from updates in the underlying buffer,
+// allowing the buffer to be placed back in the free list.
+func CopyBytes(buf *bytes.Buffer) []byte {
+       b := buf.Bytes()
+       b2 := make([]byte, len(b))
+       copy(b2, b)
+       return b2
+}
diff --git a/encoding/json/duration.go b/encoding/json/duration.go
new file mode 100644 (file)
index 0000000..86452f7
--- /dev/null
@@ -0,0 +1,58 @@
+package json
+
+import (
+       "bytes"
+       "encoding/json"
+       "errors"
+       "strconv"
+       "time"
+)
+
+type Duration struct {
+       time.Duration
+}
+
+// UnmarshalJSON fulfills the encoding/json.Unmarshaler interface.
+// It attempts to parse text as a time.Duration string.
+// The Go documentation defines this as a possibly signed sequence of decimal
+// numbers, each with optional fraction and a unit suffix, such as
+// "300ms", "-1.5h" or "2h45m".
+// Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h").
+// If there is no time unit, UnmarshalJSON defaults to ms.
+func (d *Duration) UnmarshalJSON(b []byte) error {
+       if bytes.Equal(b, []byte("null")) {
+               return nil
+       }
+
+       dMS, err := strconv.ParseInt(string(b), 10, 64)
+       if err != nil {
+               // Assume this is a string instead, in which case we need to unmarshal it as a string
+               // before we try to parse it as a time.Duration.
+               var str string
+               err = json.Unmarshal(b, &str)
+               if err != nil {
+                       return errors.New("invalid json.Duration")
+               }
+
+               d0, err := time.ParseDuration(str)
+               if err != nil {
+                       return errors.New("invalid json.Duration")
+               }
+               if d0 < 0 {
+                       return errors.New("invalid json.Duration: Duration cannot be less than 0")
+               }
+               d.Duration = d0
+       } else {
+               if dMS < 0 {
+                       return errors.New("invalid json.Duration: Duration cannot be less than 0")
+               }
+               d.Duration = time.Duration(dMS) * time.Millisecond
+       }
+
+       return nil
+}
+
+// MarshalJSON implements json.Marshaler.
+func (d Duration) MarshalJSON() ([]byte, error) {
+       return json.Marshal(d.Duration.Nanoseconds() / int64(time.Millisecond))
+}
diff --git a/encoding/json/duration_test.go b/encoding/json/duration_test.go
new file mode 100644 (file)
index 0000000..679bc63
--- /dev/null
@@ -0,0 +1,74 @@
+package json
+
+import (
+       "encoding/json"
+       "strconv"
+       "testing"
+       "time"
+)
+
+func TestUnmarshalDuration(t *testing.T) {
+       successCases := []string{
+               `1000`, // this is an "integer"
+               `"1000ms"`,
+               `"1000000000ns"`,
+               `"1s"`,
+       }
+
+       for _, c := range successCases {
+               var dur Duration
+               err := json.Unmarshal([]byte(c), &dur)
+               if err != nil {
+                       t.Errorf("unexpected error %v", err)
+               }
+
+               var want float64 = 1 // all of our inputs equal 1 second
+               if got := dur.Seconds(); got != want {
+                       t.Errorf("Duration.UnmarshalJSON(%q) = %f want %f", c, got, want)
+               }
+       }
+
+       negativeCases := []string{
+               `-1000`,
+               `"-1000ms"`,
+       }
+
+       for _, c := range negativeCases {
+               var dur Duration
+               wantErr := "invalid json.Duration: Duration cannot be less than 0"
+               err := json.Unmarshal([]byte(c), &dur)
+               if err.Error() != wantErr {
+                       t.Errorf("wanted error %s, got %s", wantErr, err)
+               }
+       }
+
+       // Null case
+       var dur Duration
+       err := json.Unmarshal([]byte("null"), &dur)
+       if err != nil {
+               t.Errorf("unexpected error %v", err)
+       }
+
+       if dur.Duration != 0 {
+               t.Errorf(`Duration.UnmarshalJSON("null") = %v want 0`, dur.Duration)
+       }
+}
+
+func TestMarshalDuration(t *testing.T) {
+       dur := Duration{
+               Duration: time.Second,
+       }
+       b, err := json.Marshal(dur)
+       if err != nil {
+               t.Errorf("unexpected error %v", err)
+       }
+
+       got, err := strconv.Atoi(string(b))
+       if err != nil {
+               t.Fatal(err)
+       }
+       want := 1000
+       if got != want {
+               t.Errorf("wanted %d, got %d", want, got)
+       }
+}
diff --git a/encoding/json/json.go b/encoding/json/json.go
new file mode 100644 (file)
index 0000000..0ad7b6a
--- /dev/null
@@ -0,0 +1,35 @@
+package json
+
+import (
+       "encoding/hex"
+       "encoding/json"
+)
+
+type HexBytes []byte
+
+func (h HexBytes) MarshalText() ([]byte, error) {
+       return []byte(hex.EncodeToString(h)), nil
+}
+
+func (h *HexBytes) UnmarshalText(text []byte) error {
+       n := hex.DecodedLen(len(text))
+       *h = make([]byte, n)
+       _, err := hex.Decode(*h, text)
+       return err
+}
+
+type Map []byte
+
+func (m Map) MarshalJSON() ([]byte, error) {
+       return m, nil
+}
+
+func (m *Map) UnmarshalJSON(text []byte) error {
+       var check map[string]*json.RawMessage
+       err := json.Unmarshal(text, &check)
+       if err != nil {
+               return err
+       }
+       *m = text
+       return nil
+}
diff --git a/errors/doc.go b/errors/doc.go
new file mode 100644 (file)
index 0000000..0437cd4
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+Package errors implements a basic error wrapping pattern, so that errors can be
+annotated with additional information without losing the original error.
+
+Example:
+
+       import "chain/errors"
+
+       func query() error {
+               err := pq.Exec("SELECT...")
+               if err != nil {
+                       return errors.Wrap(err, "select query failed")
+               }
+
+               err = pq.Exec("INSERT...")
+               if err != nil {
+                       return errors.Wrap(err, "insert query failed")
+               }
+
+               return nil
+       }
+
+       func main() {
+               err := query()
+               if _, ok := errors.Root(err).(sql.ErrNoRows); ok {
+                       log.Println("There were no results")
+                       return
+               } else if err != nil {
+                       log.Println(err)
+                       return
+               }
+
+               log.Println("success")
+       }
+
+When to wrap errors
+
+Errors should be wrapped with additional messages when the context is ambiguous.
+This includes when the error could arise in multiple locations in the same
+function, when the error is very common and likely to appear at different points
+in the call tree (e.g., JSON serialization errors), or when you need specific
+parameters alongside the original error message.
+
+Error handling best practices
+
+Errors are part of a function's interface. If you expect the caller to perform
+conditional error handling, you should document the errors returned by your
+function in a function comment, and include it as part of your unit tests.
+
+Be disciplined about validating user input. Programs should draw a very clear
+distinction between user errors and internal errors.
+
+Avoid redundant error logging. If you return an error, assume it will be logged
+higher up the call stack. For a given project, choose an appropriate layer to
+handle error logging.
+*/
+package errors
diff --git a/errors/errors.go b/errors/errors.go
new file mode 100644 (file)
index 0000000..0144287
--- /dev/null
@@ -0,0 +1,186 @@
+package errors
+
+import (
+       "errors"
+       "fmt"
+       "strings"
+)
+
+// New returns an error that formats as the given text.
+func New(text string) error {
+       return errors.New(text)
+}
+
+// wrapperError satisfies the error interface.
+type wrapperError struct {
+       msg    string
+       detail []string
+       data   map[string]interface{}
+       stack  []StackFrame
+       root   error
+}
+
+// It satisfies the error interface.
+func (e wrapperError) Error() string {
+       return e.msg
+}
+
+// Root returns the original error that was wrapped by one or more
+// calls to Wrap. If e does not wrap other errors, it will be returned
+// as-is.
+func Root(e error) error {
+       if wErr, ok := e.(wrapperError); ok {
+               return wErr.root
+       }
+       return e
+}
+
+// wrap adds a context message and stack trace to err and returns a new error
+// containing the new context. This function is meant to be composed within
+// other exported functions, such as Wrap and WithDetail.
+// The argument stackSkip is the number of stack frames to ascend when
+// generating stack straces, where 0 is the caller of wrap.
+func wrap(err error, msg string, stackSkip int) error {
+       if err == nil {
+               return nil
+       }
+
+       werr, ok := err.(wrapperError)
+       if !ok {
+               werr.root = err
+               werr.msg = err.Error()
+               werr.stack = getStack(stackSkip+2, stackTraceSize)
+       }
+       if msg != "" {
+               werr.msg = msg + ": " + werr.msg
+       }
+
+       return werr
+}
+
+// Wrap adds a context message and stack trace to err and returns a new error
+// with the new context. Arguments are handled as in fmt.Print.
+// Use Root to recover the original error wrapped by one or more calls to Wrap.
+// Use Stack to recover the stack trace.
+// Wrap returns nil if err is nil.
+func Wrap(err error, a ...interface{}) error {
+       if err == nil {
+               return nil
+       }
+       return wrap(err, fmt.Sprint(a...), 1)
+}
+
+// Wrapf is like Wrap, but arguments are handled as in fmt.Printf.
+func Wrapf(err error, format string, a ...interface{}) error {
+       if err == nil {
+               return nil
+       }
+       return wrap(err, fmt.Sprintf(format, a...), 1)
+}
+
+// WithDetail returns a new error that wraps
+// err as a chain error messsage containing text
+// as its additional context.
+// Function Detail will return the given text
+// when called on the new error value.
+func WithDetail(err error, text string) error {
+       if err == nil {
+               return nil
+       }
+       if text == "" {
+               return err
+       }
+       e1 := wrap(err, text, 1).(wrapperError)
+       e1.detail = append(e1.detail, text)
+       return e1
+}
+
+// WithDetailf is like WithDetail, except it formats
+// the detail message as in fmt.Printf.
+// Function Detail will return the formatted text
+// when called on the new error value.
+func WithDetailf(err error, format string, v ...interface{}) error {
+       if err == nil {
+               return nil
+       }
+       text := fmt.Sprintf(format, v...)
+       e1 := wrap(err, text, 1).(wrapperError)
+       e1.detail = append(e1.detail, text)
+       return e1
+}
+
+// Detail returns the detail message contained in err, if any.
+// An error has a detail message if it was made by WithDetail
+// or WithDetailf.
+func Detail(err error) string {
+       wrapper, _ := err.(wrapperError)
+       return strings.Join(wrapper.detail, "; ")
+}
+
+// withData returns a new error that wraps err
+// as a chain error message containing v as
+// an extra data item.
+// Calling Data on the returned error yields v.
+// Note that if err already has a data item,
+// it will not be accessible via the returned error value.
+func withData(err error, v map[string]interface{}) error {
+       if err == nil {
+               return nil
+       }
+       e1 := wrap(err, "", 1).(wrapperError)
+       e1.data = v
+       return e1
+}
+
+// WithData returns a new error that wraps err
+// as a chain error message containing a value of type
+// map[string]interface{} as an extra data item.
+// The map contains the values in the map in err,
+// if any, plus the items in keyval.
+// Keyval takes the form
+//   k1, v1, k2, v2, ...
+// Values kN must be strings.
+// Calling Data on the returned error yields the map.
+// Note that if err already has a data item of any other type,
+// it will not be accessible via the returned error value.
+func WithData(err error, keyval ...interface{}) error {
+       if err == nil {
+               return nil
+       }
+       // TODO(kr): add vet check for odd-length keyval and non-string keys
+       newkv := make(map[string]interface{})
+       for k, v := range Data(err) {
+               newkv[k] = v
+       }
+       for i := 0; i < len(keyval); i += 2 {
+               newkv[keyval[i].(string)] = keyval[i+1]
+       }
+       return withData(err, newkv)
+}
+
+// Data returns the data item in err, if any.
+func Data(err error) map[string]interface{} {
+       wrapper, _ := err.(wrapperError)
+       return wrapper.data
+}
+
+// Sub returns an error containing root as its root and
+// taking all other metadata (stack trace, detail, message,
+// and data items) from err.
+//
+// Sub returns nil when either root or err is nil.
+//
+// Use this when you need to substitute a new root error in place
+// of an existing error that may already hold a stack trace
+// or other metadata.
+func Sub(root, err error) error {
+       if wrapper, ok := err.(wrapperError); ok && root != nil {
+               wrapper.root = Root(root)
+               wrapper.msg = root.Error()
+               root = wrapper
+       }
+       if err == nil {
+               return nil
+       }
+       return Wrap(root, err.Error())
+}
diff --git a/errors/errors_test.go b/errors/errors_test.go
new file mode 100644 (file)
index 0000000..2f1b657
--- /dev/null
@@ -0,0 +1,142 @@
+package errors
+
+import (
+       "errors"
+       "reflect"
+       "strings"
+       "testing"
+)
+
+func TestWrap(t *testing.T) {
+       err := errors.New("0")
+       err1 := Wrap(err, "1")
+       err2 := Wrap(err1, "2")
+       err3 := Wrap(err2)
+
+       if got := Root(err1); got != err {
+               t.Fatalf("Root(%v)=%v want %v", err1, got, err)
+       }
+
+       if got := Root(err2); got != err {
+               t.Fatalf("Root(%v)=%v want %v", err2, got, err)
+       }
+
+       if err2.Error() != "2: 1: 0" {
+               t.Fatalf("err msg = %s want '2: 1: 0'", err2.Error())
+       }
+
+       if err3.Error() != "2: 1: 0" {
+               t.Fatalf("err msg = %s want '2: 1: 0'", err3.Error())
+       }
+
+       stack := Stack(err1)
+       if len(stack) == 0 {
+               t.Fatalf("len(stack) = %v want > 0", len(stack))
+       }
+       if !strings.Contains(stack[0].String(), "TestWrap") {
+               t.Fatalf("first stack frame should contain \"TestWrap\": %v", stack[0].String())
+       }
+
+       if !reflect.DeepEqual(Stack(err2), Stack(err1)) {
+               t.Errorf("err2 stack got %v want %v", Stack(err2), Stack(err1))
+       }
+
+       if !reflect.DeepEqual(Stack(err3), Stack(err1)) {
+               t.Errorf("err3 stack got %v want %v", Stack(err3), Stack(err1))
+       }
+}
+
+func TestWrapNil(t *testing.T) {
+       var err error
+
+       err1 := Wrap(err, "1")
+       if err1 != nil {
+               t.Fatal("wrapping nil error should yield nil")
+       }
+}
+
+func TestWrapf(t *testing.T) {
+       err := errors.New("0")
+       err1 := Wrapf(err, "there are %d errors being wrapped", 1)
+       if err1.Error() != "there are 1 errors being wrapped: 0" {
+               t.Fatalf("err msg = %s want 'there are 1 errors being wrapped: 0'", err1.Error())
+       }
+}
+
+func TestWrapMsg(t *testing.T) {
+       err := errors.New("rooti")
+       err1 := Wrap(err, "cherry", " ", "guava")
+       if err1.Error() != "cherry guava: rooti" {
+               t.Fatalf("err msg = %s want 'cherry guava: rooti'", err1.Error())
+       }
+}
+
+func TestDetail(t *testing.T) {
+       root := errors.New("foo")
+       cases := []struct {
+               err     error
+               detail  string
+               message string
+       }{
+               {root, "", "foo"},
+               {WithDetail(root, "bar"), "bar", "bar: foo"},
+               {WithDetail(WithDetail(root, "bar"), "baz"), "bar; baz", "baz: bar: foo"},
+               {Wrap(WithDetail(root, "bar"), "baz"), "bar", "baz: bar: foo"},
+       }
+
+       for _, test := range cases {
+               if got := Detail(test.err); got != test.detail {
+                       t.Errorf("Detail(%v) = %v want %v", test.err, got, test.detail)
+               }
+               if got := Root(test.err); got != root {
+                       t.Errorf("Root(%v) = %v want %v", test.err, got, root)
+               }
+               if got := test.err.Error(); got != test.message {
+                       t.Errorf("(%v).Error() = %v want %v", test.err, got, test.message)
+               }
+       }
+}
+
+func TestData(t *testing.T) {
+       root := errors.New("foo")
+       cases := []struct {
+               err  error
+               data interface{}
+       }{
+               {WithData(root, "a", "b"), map[string]interface{}{"a": "b"}},
+               {WithData(WithData(root, "a", "b"), "c", "d"), map[string]interface{}{"a": "b", "c": "d"}},
+               {Wrap(WithData(root, "a", "b"), "baz"), map[string]interface{}{"a": "b"}},
+       }
+
+       for _, test := range cases {
+               if got := Data(test.err); !reflect.DeepEqual(got, test.data) {
+                       t.Errorf("Data(%#v) = %v want %v", test.err, got, test.data)
+               }
+               if got := Root(test.err); got != root {
+                       t.Errorf("Root(%#v) = %v want %v", test.err, got, root)
+               }
+       }
+}
+
+func TestSub(t *testing.T) {
+       x := errors.New("x")
+       y := errors.New("y")
+       cases := []struct{ new, old, want error }{
+               {nil, nil, nil},
+               {x, nil, nil},
+               {nil, Wrap(y), nil},
+               {Wrap(x), nil, nil},
+               {nil, y, nil},
+               {x, y, errors.New("y: x")},
+               {Wrap(x), y, errors.New("y: x")},
+               {x, Wrap(y), errors.New("y: x")},
+               {Wrap(x, "z"), Wrap(y), errors.New("y: z: x")},
+       }
+
+       for _, test := range cases {
+               got := Sub(test.new, test.old)
+               if !(got == nil && test.want == nil || got.Error() == test.want.Error()) {
+                       t.Errorf("Sub(%#v, %#v) = %v, want %v", test.new, test.old, got, test.want)
+               }
+       }
+}
diff --git a/errors/example_test.go b/errors/example_test.go
new file mode 100644 (file)
index 0000000..67f252b
--- /dev/null
@@ -0,0 +1,21 @@
+package errors_test
+
+import "chain/errors"
+
+var ErrInvalidKey = errors.New("invalid key")
+
+func ExampleSub() {
+       err := sign()
+       if err != nil {
+               err = errors.Sub(ErrInvalidKey, err)
+               return
+       }
+}
+
+func ExampleSub_return() {
+       err := sign()
+       err = errors.Sub(ErrInvalidKey, err)
+       return
+}
+
+func sign() error { return nil }
diff --git a/errors/stack.go b/errors/stack.go
new file mode 100644 (file)
index 0000000..c471c07
--- /dev/null
@@ -0,0 +1,51 @@
+package errors
+
+import (
+       "fmt"
+       "runtime"
+)
+
+const stackTraceSize = 10
+
+// StackFrame represents a single entry in a stack trace.
+type StackFrame struct {
+       Func string
+       File string
+       Line int
+}
+
+// String satisfies the fmt.Stringer interface.
+func (f StackFrame) String() string {
+       return fmt.Sprintf("%s:%d - %s", f.File, f.Line, f.Func)
+}
+
+// Stack returns the stack trace of an error. The error must contain the stack
+// trace, or wrap an error that has a stack trace,
+func Stack(err error) []StackFrame {
+       if wErr, ok := err.(wrapperError); ok {
+               return wErr.stack
+       }
+       return nil
+}
+
+// getStack is a formatting wrapper around runtime.Callers. It returns a stack
+// trace in the form of a StackFrame slice.
+func getStack(skip int, size int) []StackFrame {
+       var (
+               pc    = make([]uintptr, size)
+               calls = runtime.Callers(skip+1, pc)
+               trace []StackFrame
+       )
+
+       for i := 0; i < calls; i++ {
+               f := runtime.FuncForPC(pc[i])
+               file, line := f.FileLine(pc[i] - 1)
+               trace = append(trace, StackFrame{
+                       Func: f.Name(),
+                       File: file,
+                       Line: line,
+               })
+       }
+
+       return trace
+}
diff --git a/errors/writer.go b/errors/writer.go
new file mode 100644 (file)
index 0000000..fd59b65
--- /dev/null
@@ -0,0 +1,46 @@
+package errors
+
+import "io"
+
+// NewWriter returns a new Writer that writes to w
+// until an error is returned.
+func NewWriter(w io.Writer) *Writer {
+       return &Writer{w: w}
+}
+
+// Writer is in an implementation of the
+// "sticky error writer" pattern as described
+// in https://blog.golang.org/errors-are-values.
+//
+// A Writer makes one call
+// on the underlying writer for each call to Write,
+// until an error is returned. From that point on,
+// it makes no calls on the underlying writer,
+// and returns the same error value every time.
+type Writer struct {
+       w   io.Writer
+       n   int64
+       err error
+}
+
+// Write makes one call on the underlying writer
+// if no error has previously occurred.
+func (w *Writer) Write(buf []byte) (n int, err error) {
+       if w.err != nil {
+               return 0, w.err
+       }
+       n, w.err = w.w.Write(buf)
+       w.n += int64(n)
+       return n, w.err
+}
+
+// Err returns the first error encountered by Write, if any.
+func (w *Writer) Err() error {
+       return w.err
+}
+
+// Written returns the number of bytes written
+// to the underlying writer.
+func (w *Writer) Written() int64 {
+       return w.n
+}
diff --git a/errors/writer_test.go b/errors/writer_test.go
new file mode 100644 (file)
index 0000000..e8f800b
--- /dev/null
@@ -0,0 +1,51 @@
+package errors
+
+import (
+       "io"
+       "testing"
+)
+
+func TestWriter(t *testing.T) {
+       errX := New("x")
+       tw := testWriter{nil, errX, nil}
+       w := NewWriter(&tw)
+       _, err := w.Write([]byte{1})
+       if err != nil {
+               t.Error("unexpected error", err)
+       }
+       if g := w.Written(); g != 1 {
+               t.Errorf("w.Written() = %d want 1", g)
+       }
+       if len(tw) != 2 {
+               t.Errorf("len(tw) = %d want 2", len(tw))
+       }
+       for i := 0; i < 10; i++ {
+               _, err = w.Write([]byte{1})
+               if err != errX {
+                       t.Errorf("err = %v want %v", err, errX)
+               }
+               if g := w.Written(); g != 2 {
+                       t.Errorf("w.Written() = %d want 2", g)
+               }
+               if len(tw) != 1 {
+                       t.Errorf("len(tw) = %d want 1", len(tw))
+               }
+       }
+       if got := w.Err(); got != errX {
+               t.Errorf("w.Err() = %v want %v", got, errX)
+       }
+}
+
+// testWriter returns its errors in order.
+// elements of a testWriter may be nil.
+// if its len is 0, it returns io.EOF.
+type testWriter []error
+
+func (tw *testWriter) Write(p []byte) (int, error) {
+       if len(*tw) == 0 {
+               return len(p), io.EOF
+       }
+       err := (*tw)[0]
+       *tw = (*tw)[1:]
+       return len(p), err
+}
diff --git a/protocol/bc/asset.go b/protocol/bc/asset.go
new file mode 100644 (file)
index 0000000..97496f6
--- /dev/null
@@ -0,0 +1,78 @@
+package bc
+
+import (
+       "database/sql/driver"
+       "errors"
+       "io"
+
+       "chain/crypto/sha3pool"
+       "chain/encoding/blockchain"
+)
+
+// AssetID is the Hash256 of the asset definition.
+
+func NewAssetID(b [32]byte) (a AssetID) {
+       return AssetID(NewHash(b))
+}
+
+func (a AssetID) Byte32() (b32 [32]byte)               { return Hash(a).Byte32() }
+func (a AssetID) MarshalText() ([]byte, error)         { return Hash(a).MarshalText() }
+func (a *AssetID) UnmarshalText(b []byte) error        { return (*Hash)(a).UnmarshalText(b) }
+func (a *AssetID) UnmarshalJSON(b []byte) error        { return (*Hash)(a).UnmarshalJSON(b) }
+func (a AssetID) Bytes() []byte                        { return Hash(a).Bytes() }
+func (a AssetID) Value() (driver.Value, error)         { return Hash(a).Value() }
+func (a *AssetID) Scan(val interface{}) error          { return (*Hash)(a).Scan(val) }
+func (a AssetID) WriteTo(w io.Writer) (int64, error)   { return Hash(a).WriteTo(w) }
+func (a *AssetID) ReadFrom(r io.Reader) (int64, error) { return (*Hash)(a).ReadFrom(r) }
+func (a *AssetID) IsZero() bool                        { return (*Hash)(a).IsZero() }
+
+func (ad *AssetDefinition) ComputeAssetID() (assetID AssetID) {
+       h := sha3pool.Get256()
+       defer sha3pool.Put256(h)
+       writeForHash(h, *ad) // error is impossible
+       var b [32]byte
+       h.Read(b[:]) // error is impossible
+       return NewAssetID(b)
+}
+
+func ComputeAssetID(prog []byte, initialBlockID *Hash, vmVersion uint64, data *Hash) AssetID {
+       def := &AssetDefinition{
+               InitialBlockId: initialBlockID,
+               IssuanceProgram: &Program{
+                       VmVersion: vmVersion,
+                       Code:      prog,
+               },
+               Data: data,
+       }
+       return def.ComputeAssetID()
+}
+
+func (a *AssetAmount) ReadFrom(r *blockchain.Reader) error {
+       var assetID AssetID
+       _, err := assetID.ReadFrom(r)
+       if err != nil {
+               return err
+       }
+       a.AssetId = &assetID
+       a.Amount, err = blockchain.ReadVarint63(r)
+       return err
+}
+
+func (a AssetAmount) WriteTo(w io.Writer) (int64, error) {
+       n, err := a.AssetId.WriteTo(w)
+       if err != nil {
+               return n, err
+       }
+       n2, err := blockchain.WriteVarint63(w, a.Amount)
+       return n + int64(n2), err
+}
+
+func (a *AssetAmount) Equal(other *AssetAmount) (eq bool, err error) {
+       if a == nil || other == nil {
+               return false, errors.New("empty asset amount")
+       }
+       if a.AssetId == nil || other.AssetId == nil {
+               return false, errors.New("empty asset id")
+       }
+       return a.Amount == other.Amount && *a.AssetId == *other.AssetId, nil
+}
diff --git a/protocol/bc/asset_test.go b/protocol/bc/asset_test.go
new file mode 100644 (file)
index 0000000..c5b6c4a
--- /dev/null
@@ -0,0 +1,45 @@
+package bc
+
+import (
+       "testing"
+
+       "golang.org/x/crypto/sha3"
+)
+
+func TestComputeAssetID(t *testing.T) {
+       issuanceScript := []byte{1}
+       initialBlockHash := mustDecodeHash("dd506f5d4c3f904d3d4b3c3be597c9198c6193ffd14a28570e4a923ce40cf9e5")
+       assetID := ComputeAssetID(issuanceScript, &initialBlockHash, 1, &EmptyStringHash)
+
+       unhashed := append([]byte{}, initialBlockHash.Bytes()...)
+       unhashed = append(unhashed, 0x01) // vmVersion
+       unhashed = append(unhashed, 0x01) // length of issuanceScript
+       unhashed = append(unhashed, issuanceScript...)
+       unhashed = append(unhashed, EmptyStringHash.Bytes()...)
+       want := NewAssetID(sha3.Sum256(unhashed))
+
+       if assetID != want {
+               t.Errorf("asset id = %x want %x", assetID.Bytes(), want.Bytes())
+       }
+}
+
+var assetIDSink AssetID
+
+func BenchmarkComputeAssetID(b *testing.B) {
+       var (
+               initialBlockHash Hash
+               issuanceScript   = []byte{5}
+       )
+
+       for i := 0; i < b.N; i++ {
+               assetIDSink = ComputeAssetID(issuanceScript, &initialBlockHash, 1, &EmptyStringHash)
+       }
+}
+
+func mustDecodeHash(s string) (h Hash) {
+       err := h.UnmarshalText([]byte(s))
+       if err != nil {
+               panic(err)
+       }
+       return h
+}
diff --git a/protocol/bc/bc.pb.go b/protocol/bc/bc.pb.go
new file mode 100644 (file)
index 0000000..3119892
--- /dev/null
@@ -0,0 +1,854 @@
+// Code generated by protoc-gen-go.
+// source: bc.proto
+// DO NOT EDIT!
+
+/*
+Package bc is a generated protocol buffer package.
+
+It is generated from these files:
+       bc.proto
+
+It has these top-level messages:
+       Hash
+       Program
+       AssetID
+       AssetAmount
+       AssetDefinition
+       ValueSource
+       ValueDestination
+       BlockHeader
+       TxHeader
+       Mux
+       Nonce
+       Output
+       Retirement
+       TimeRange
+       Issuance
+       Spend
+*/
+package bc
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Hash struct {
+       V0 uint64 `protobuf:"fixed64,1,opt,name=v0" json:"v0,omitempty"`
+       V1 uint64 `protobuf:"fixed64,2,opt,name=v1" json:"v1,omitempty"`
+       V2 uint64 `protobuf:"fixed64,3,opt,name=v2" json:"v2,omitempty"`
+       V3 uint64 `protobuf:"fixed64,4,opt,name=v3" json:"v3,omitempty"`
+}
+
+func (m *Hash) Reset()                    { *m = Hash{} }
+func (m *Hash) String() string            { return proto.CompactTextString(m) }
+func (*Hash) ProtoMessage()               {}
+func (*Hash) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *Hash) GetV0() uint64 {
+       if m != nil {
+               return m.V0
+       }
+       return 0
+}
+
+func (m *Hash) GetV1() uint64 {
+       if m != nil {
+               return m.V1
+       }
+       return 0
+}
+
+func (m *Hash) GetV2() uint64 {
+       if m != nil {
+               return m.V2
+       }
+       return 0
+}
+
+func (m *Hash) GetV3() uint64 {
+       if m != nil {
+               return m.V3
+       }
+       return 0
+}
+
+type Program struct {
+       VmVersion uint64 `protobuf:"varint,1,opt,name=vm_version,json=vmVersion" json:"vm_version,omitempty"`
+       Code      []byte `protobuf:"bytes,2,opt,name=code,proto3" json:"code,omitempty"`
+}
+
+func (m *Program) Reset()                    { *m = Program{} }
+func (m *Program) String() string            { return proto.CompactTextString(m) }
+func (*Program) ProtoMessage()               {}
+func (*Program) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *Program) GetVmVersion() uint64 {
+       if m != nil {
+               return m.VmVersion
+       }
+       return 0
+}
+
+func (m *Program) GetCode() []byte {
+       if m != nil {
+               return m.Code
+       }
+       return nil
+}
+
+// This message type duplicates Hash, above. One alternative is to
+// embed a Hash inside an AssetID. But it's useful for AssetID to be
+// plain old data (without pointers). Another alternative is use Hash
+// in any protobuf types where an AssetID is called for, but it's
+// preferable to have type safety.
+type AssetID struct {
+       V0 uint64 `protobuf:"fixed64,1,opt,name=v0" json:"v0,omitempty"`
+       V1 uint64 `protobuf:"fixed64,2,opt,name=v1" json:"v1,omitempty"`
+       V2 uint64 `protobuf:"fixed64,3,opt,name=v2" json:"v2,omitempty"`
+       V3 uint64 `protobuf:"fixed64,4,opt,name=v3" json:"v3,omitempty"`
+}
+
+func (m *AssetID) Reset()                    { *m = AssetID{} }
+func (m *AssetID) String() string            { return proto.CompactTextString(m) }
+func (*AssetID) ProtoMessage()               {}
+func (*AssetID) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+func (m *AssetID) GetV0() uint64 {
+       if m != nil {
+               return m.V0
+       }
+       return 0
+}
+
+func (m *AssetID) GetV1() uint64 {
+       if m != nil {
+               return m.V1
+       }
+       return 0
+}
+
+func (m *AssetID) GetV2() uint64 {
+       if m != nil {
+               return m.V2
+       }
+       return 0
+}
+
+func (m *AssetID) GetV3() uint64 {
+       if m != nil {
+               return m.V3
+       }
+       return 0
+}
+
+type AssetAmount struct {
+       AssetId *AssetID `protobuf:"bytes,1,opt,name=asset_id,json=assetId" json:"asset_id,omitempty"`
+       Amount  uint64   `protobuf:"varint,2,opt,name=amount" json:"amount,omitempty"`
+}
+
+func (m *AssetAmount) Reset()                    { *m = AssetAmount{} }
+func (m *AssetAmount) String() string            { return proto.CompactTextString(m) }
+func (*AssetAmount) ProtoMessage()               {}
+func (*AssetAmount) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+func (m *AssetAmount) GetAssetId() *AssetID {
+       if m != nil {
+               return m.AssetId
+       }
+       return nil
+}
+
+func (m *AssetAmount) GetAmount() uint64 {
+       if m != nil {
+               return m.Amount
+       }
+       return 0
+}
+
+type AssetDefinition struct {
+       InitialBlockId  *Hash    `protobuf:"bytes,1,opt,name=initial_block_id,json=initialBlockId" json:"initial_block_id,omitempty"`
+       IssuanceProgram *Program `protobuf:"bytes,2,opt,name=issuance_program,json=issuanceProgram" json:"issuance_program,omitempty"`
+       Data            *Hash    `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
+}
+
+func (m *AssetDefinition) Reset()                    { *m = AssetDefinition{} }
+func (m *AssetDefinition) String() string            { return proto.CompactTextString(m) }
+func (*AssetDefinition) ProtoMessage()               {}
+func (*AssetDefinition) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+
+func (m *AssetDefinition) GetInitialBlockId() *Hash {
+       if m != nil {
+               return m.InitialBlockId
+       }
+       return nil
+}
+
+func (m *AssetDefinition) GetIssuanceProgram() *Program {
+       if m != nil {
+               return m.IssuanceProgram
+       }
+       return nil
+}
+
+func (m *AssetDefinition) GetData() *Hash {
+       if m != nil {
+               return m.Data
+       }
+       return nil
+}
+
+type ValueSource struct {
+       Ref      *Hash        `protobuf:"bytes,1,opt,name=ref" json:"ref,omitempty"`
+       Value    *AssetAmount `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+       Position uint64       `protobuf:"varint,3,opt,name=position" json:"position,omitempty"`
+}
+
+func (m *ValueSource) Reset()                    { *m = ValueSource{} }
+func (m *ValueSource) String() string            { return proto.CompactTextString(m) }
+func (*ValueSource) ProtoMessage()               {}
+func (*ValueSource) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+
+func (m *ValueSource) GetRef() *Hash {
+       if m != nil {
+               return m.Ref
+       }
+       return nil
+}
+
+func (m *ValueSource) GetValue() *AssetAmount {
+       if m != nil {
+               return m.Value
+       }
+       return nil
+}
+
+func (m *ValueSource) GetPosition() uint64 {
+       if m != nil {
+               return m.Position
+       }
+       return 0
+}
+
+type ValueDestination struct {
+       Ref      *Hash        `protobuf:"bytes,1,opt,name=ref" json:"ref,omitempty"`
+       Value    *AssetAmount `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+       Position uint64       `protobuf:"varint,3,opt,name=position" json:"position,omitempty"`
+}
+
+func (m *ValueDestination) Reset()                    { *m = ValueDestination{} }
+func (m *ValueDestination) String() string            { return proto.CompactTextString(m) }
+func (*ValueDestination) ProtoMessage()               {}
+func (*ValueDestination) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+
+func (m *ValueDestination) GetRef() *Hash {
+       if m != nil {
+               return m.Ref
+       }
+       return nil
+}
+
+func (m *ValueDestination) GetValue() *AssetAmount {
+       if m != nil {
+               return m.Value
+       }
+       return nil
+}
+
+func (m *ValueDestination) GetPosition() uint64 {
+       if m != nil {
+               return m.Position
+       }
+       return 0
+}
+
+type BlockHeader struct {
+       Version              uint64   `protobuf:"varint,1,opt,name=version" json:"version,omitempty"`
+       Height               uint64   `protobuf:"varint,2,opt,name=height" json:"height,omitempty"`
+       PreviousBlockId      *Hash    `protobuf:"bytes,3,opt,name=previous_block_id,json=previousBlockId" json:"previous_block_id,omitempty"`
+       TimestampMs          uint64   `protobuf:"varint,4,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
+       TransactionsRoot     *Hash    `protobuf:"bytes,5,opt,name=transactions_root,json=transactionsRoot" json:"transactions_root,omitempty"`
+       AssetsRoot           *Hash    `protobuf:"bytes,6,opt,name=assets_root,json=assetsRoot" json:"assets_root,omitempty"`
+       NextConsensusProgram []byte   `protobuf:"bytes,7,opt,name=next_consensus_program,json=nextConsensusProgram,proto3" json:"next_consensus_program,omitempty"`
+       ExtHash              *Hash    `protobuf:"bytes,8,opt,name=ext_hash,json=extHash" json:"ext_hash,omitempty"`
+       WitnessArguments     [][]byte `protobuf:"bytes,9,rep,name=witness_arguments,json=witnessArguments,proto3" json:"witness_arguments,omitempty"`
+}
+
+func (m *BlockHeader) Reset()                    { *m = BlockHeader{} }
+func (m *BlockHeader) String() string            { return proto.CompactTextString(m) }
+func (*BlockHeader) ProtoMessage()               {}
+func (*BlockHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+
+func (m *BlockHeader) GetVersion() uint64 {
+       if m != nil {
+               return m.Version
+       }
+       return 0
+}
+
+func (m *BlockHeader) GetHeight() uint64 {
+       if m != nil {
+               return m.Height
+       }
+       return 0
+}
+
+func (m *BlockHeader) GetPreviousBlockId() *Hash {
+       if m != nil {
+               return m.PreviousBlockId
+       }
+       return nil
+}
+
+func (m *BlockHeader) GetTimestampMs() uint64 {
+       if m != nil {
+               return m.TimestampMs
+       }
+       return 0
+}
+
+func (m *BlockHeader) GetTransactionsRoot() *Hash {
+       if m != nil {
+               return m.TransactionsRoot
+       }
+       return nil
+}
+
+func (m *BlockHeader) GetAssetsRoot() *Hash {
+       if m != nil {
+               return m.AssetsRoot
+       }
+       return nil
+}
+
+func (m *BlockHeader) GetNextConsensusProgram() []byte {
+       if m != nil {
+               return m.NextConsensusProgram
+       }
+       return nil
+}
+
+func (m *BlockHeader) GetExtHash() *Hash {
+       if m != nil {
+               return m.ExtHash
+       }
+       return nil
+}
+
+func (m *BlockHeader) GetWitnessArguments() [][]byte {
+       if m != nil {
+               return m.WitnessArguments
+       }
+       return nil
+}
+
+type TxHeader struct {
+       Version   uint64  `protobuf:"varint,1,opt,name=version" json:"version,omitempty"`
+       ResultIds []*Hash `protobuf:"bytes,2,rep,name=result_ids,json=resultIds" json:"result_ids,omitempty"`
+       Data      *Hash   `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
+       MinTimeMs uint64  `protobuf:"varint,4,opt,name=min_time_ms,json=minTimeMs" json:"min_time_ms,omitempty"`
+       MaxTimeMs uint64  `protobuf:"varint,5,opt,name=max_time_ms,json=maxTimeMs" json:"max_time_ms,omitempty"`
+       ExtHash   *Hash   `protobuf:"bytes,6,opt,name=ext_hash,json=extHash" json:"ext_hash,omitempty"`
+}
+
+func (m *TxHeader) Reset()                    { *m = TxHeader{} }
+func (m *TxHeader) String() string            { return proto.CompactTextString(m) }
+func (*TxHeader) ProtoMessage()               {}
+func (*TxHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+
+func (m *TxHeader) GetVersion() uint64 {
+       if m != nil {
+               return m.Version
+       }
+       return 0
+}
+
+func (m *TxHeader) GetResultIds() []*Hash {
+       if m != nil {
+               return m.ResultIds
+       }
+       return nil
+}
+
+func (m *TxHeader) GetData() *Hash {
+       if m != nil {
+               return m.Data
+       }
+       return nil
+}
+
+func (m *TxHeader) GetMinTimeMs() uint64 {
+       if m != nil {
+               return m.MinTimeMs
+       }
+       return 0
+}
+
+func (m *TxHeader) GetMaxTimeMs() uint64 {
+       if m != nil {
+               return m.MaxTimeMs
+       }
+       return 0
+}
+
+func (m *TxHeader) GetExtHash() *Hash {
+       if m != nil {
+               return m.ExtHash
+       }
+       return nil
+}
+
+type Mux struct {
+       Sources             []*ValueSource      `protobuf:"bytes,1,rep,name=sources" json:"sources,omitempty"`
+       Program             *Program            `protobuf:"bytes,2,opt,name=program" json:"program,omitempty"`
+       ExtHash             *Hash               `protobuf:"bytes,3,opt,name=ext_hash,json=extHash" json:"ext_hash,omitempty"`
+       WitnessDestinations []*ValueDestination `protobuf:"bytes,4,rep,name=witness_destinations,json=witnessDestinations" json:"witness_destinations,omitempty"`
+       WitnessArguments    [][]byte            `protobuf:"bytes,5,rep,name=witness_arguments,json=witnessArguments,proto3" json:"witness_arguments,omitempty"`
+}
+
+func (m *Mux) Reset()                    { *m = Mux{} }
+func (m *Mux) String() string            { return proto.CompactTextString(m) }
+func (*Mux) ProtoMessage()               {}
+func (*Mux) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+
+func (m *Mux) GetSources() []*ValueSource {
+       if m != nil {
+               return m.Sources
+       }
+       return nil
+}
+
+func (m *Mux) GetProgram() *Program {
+       if m != nil {
+               return m.Program
+       }
+       return nil
+}
+
+func (m *Mux) GetExtHash() *Hash {
+       if m != nil {
+               return m.ExtHash
+       }
+       return nil
+}
+
+func (m *Mux) GetWitnessDestinations() []*ValueDestination {
+       if m != nil {
+               return m.WitnessDestinations
+       }
+       return nil
+}
+
+func (m *Mux) GetWitnessArguments() [][]byte {
+       if m != nil {
+               return m.WitnessArguments
+       }
+       return nil
+}
+
+type Nonce struct {
+       Program           *Program `protobuf:"bytes,1,opt,name=program" json:"program,omitempty"`
+       TimeRangeId       *Hash    `protobuf:"bytes,2,opt,name=time_range_id,json=timeRangeId" json:"time_range_id,omitempty"`
+       ExtHash           *Hash    `protobuf:"bytes,3,opt,name=ext_hash,json=extHash" json:"ext_hash,omitempty"`
+       WitnessArguments  [][]byte `protobuf:"bytes,4,rep,name=witness_arguments,json=witnessArguments,proto3" json:"witness_arguments,omitempty"`
+       WitnessAnchoredId *Hash    `protobuf:"bytes,5,opt,name=witness_anchored_id,json=witnessAnchoredId" json:"witness_anchored_id,omitempty"`
+}
+
+func (m *Nonce) Reset()                    { *m = Nonce{} }
+func (m *Nonce) String() string            { return proto.CompactTextString(m) }
+func (*Nonce) ProtoMessage()               {}
+func (*Nonce) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+
+func (m *Nonce) GetProgram() *Program {
+       if m != nil {
+               return m.Program
+       }
+       return nil
+}
+
+func (m *Nonce) GetTimeRangeId() *Hash {
+       if m != nil {
+               return m.TimeRangeId
+       }
+       return nil
+}
+
+func (m *Nonce) GetExtHash() *Hash {
+       if m != nil {
+               return m.ExtHash
+       }
+       return nil
+}
+
+func (m *Nonce) GetWitnessArguments() [][]byte {
+       if m != nil {
+               return m.WitnessArguments
+       }
+       return nil
+}
+
+func (m *Nonce) GetWitnessAnchoredId() *Hash {
+       if m != nil {
+               return m.WitnessAnchoredId
+       }
+       return nil
+}
+
+type Output struct {
+       Source         *ValueSource `protobuf:"bytes,1,opt,name=source" json:"source,omitempty"`
+       ControlProgram *Program     `protobuf:"bytes,2,opt,name=control_program,json=controlProgram" json:"control_program,omitempty"`
+       Data           *Hash        `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
+       ExtHash        *Hash        `protobuf:"bytes,4,opt,name=ext_hash,json=extHash" json:"ext_hash,omitempty"`
+       Ordinal        uint64       `protobuf:"varint,5,opt,name=ordinal" json:"ordinal,omitempty"`
+}
+
+func (m *Output) Reset()                    { *m = Output{} }
+func (m *Output) String() string            { return proto.CompactTextString(m) }
+func (*Output) ProtoMessage()               {}
+func (*Output) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+
+func (m *Output) GetSource() *ValueSource {
+       if m != nil {
+               return m.Source
+       }
+       return nil
+}
+
+func (m *Output) GetControlProgram() *Program {
+       if m != nil {
+               return m.ControlProgram
+       }
+       return nil
+}
+
+func (m *Output) GetData() *Hash {
+       if m != nil {
+               return m.Data
+       }
+       return nil
+}
+
+func (m *Output) GetExtHash() *Hash {
+       if m != nil {
+               return m.ExtHash
+       }
+       return nil
+}
+
+func (m *Output) GetOrdinal() uint64 {
+       if m != nil {
+               return m.Ordinal
+       }
+       return 0
+}
+
+type Retirement struct {
+       Source  *ValueSource `protobuf:"bytes,1,opt,name=source" json:"source,omitempty"`
+       Data    *Hash        `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"`
+       ExtHash *Hash        `protobuf:"bytes,3,opt,name=ext_hash,json=extHash" json:"ext_hash,omitempty"`
+       Ordinal uint64       `protobuf:"varint,4,opt,name=ordinal" json:"ordinal,omitempty"`
+}
+
+func (m *Retirement) Reset()                    { *m = Retirement{} }
+func (m *Retirement) String() string            { return proto.CompactTextString(m) }
+func (*Retirement) ProtoMessage()               {}
+func (*Retirement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+
+func (m *Retirement) GetSource() *ValueSource {
+       if m != nil {
+               return m.Source
+       }
+       return nil
+}
+
+func (m *Retirement) GetData() *Hash {
+       if m != nil {
+               return m.Data
+       }
+       return nil
+}
+
+func (m *Retirement) GetExtHash() *Hash {
+       if m != nil {
+               return m.ExtHash
+       }
+       return nil
+}
+
+func (m *Retirement) GetOrdinal() uint64 {
+       if m != nil {
+               return m.Ordinal
+       }
+       return 0
+}
+
+type TimeRange struct {
+       MinTimeMs uint64 `protobuf:"varint,1,opt,name=min_time_ms,json=minTimeMs" json:"min_time_ms,omitempty"`
+       MaxTimeMs uint64 `protobuf:"varint,2,opt,name=max_time_ms,json=maxTimeMs" json:"max_time_ms,omitempty"`
+       ExtHash   *Hash  `protobuf:"bytes,3,opt,name=ext_hash,json=extHash" json:"ext_hash,omitempty"`
+}
+
+func (m *TimeRange) Reset()                    { *m = TimeRange{} }
+func (m *TimeRange) String() string            { return proto.CompactTextString(m) }
+func (*TimeRange) ProtoMessage()               {}
+func (*TimeRange) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+
+func (m *TimeRange) GetMinTimeMs() uint64 {
+       if m != nil {
+               return m.MinTimeMs
+       }
+       return 0
+}
+
+func (m *TimeRange) GetMaxTimeMs() uint64 {
+       if m != nil {
+               return m.MaxTimeMs
+       }
+       return 0
+}
+
+func (m *TimeRange) GetExtHash() *Hash {
+       if m != nil {
+               return m.ExtHash
+       }
+       return nil
+}
+
+type Issuance struct {
+       AnchorId               *Hash             `protobuf:"bytes,1,opt,name=anchor_id,json=anchorId" json:"anchor_id,omitempty"`
+       Value                  *AssetAmount      `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+       Data                   *Hash             `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
+       ExtHash                *Hash             `protobuf:"bytes,4,opt,name=ext_hash,json=extHash" json:"ext_hash,omitempty"`
+       WitnessDestination     *ValueDestination `protobuf:"bytes,5,opt,name=witness_destination,json=witnessDestination" json:"witness_destination,omitempty"`
+       WitnessAssetDefinition *AssetDefinition  `protobuf:"bytes,6,opt,name=witness_asset_definition,json=witnessAssetDefinition" json:"witness_asset_definition,omitempty"`
+       WitnessArguments       [][]byte          `protobuf:"bytes,7,rep,name=witness_arguments,json=witnessArguments,proto3" json:"witness_arguments,omitempty"`
+       WitnessAnchoredId      *Hash             `protobuf:"bytes,8,opt,name=witness_anchored_id,json=witnessAnchoredId" json:"witness_anchored_id,omitempty"`
+       Ordinal                uint64            `protobuf:"varint,9,opt,name=ordinal" json:"ordinal,omitempty"`
+}
+
+func (m *Issuance) Reset()                    { *m = Issuance{} }
+func (m *Issuance) String() string            { return proto.CompactTextString(m) }
+func (*Issuance) ProtoMessage()               {}
+func (*Issuance) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+
+func (m *Issuance) GetAnchorId() *Hash {
+       if m != nil {
+               return m.AnchorId
+       }
+       return nil
+}
+
+func (m *Issuance) GetValue() *AssetAmount {
+       if m != nil {
+               return m.Value
+       }
+       return nil
+}
+
+func (m *Issuance) GetData() *Hash {
+       if m != nil {
+               return m.Data
+       }
+       return nil
+}
+
+func (m *Issuance) GetExtHash() *Hash {
+       if m != nil {
+               return m.ExtHash
+       }
+       return nil
+}
+
+func (m *Issuance) GetWitnessDestination() *ValueDestination {
+       if m != nil {
+               return m.WitnessDestination
+       }
+       return nil
+}
+
+func (m *Issuance) GetWitnessAssetDefinition() *AssetDefinition {
+       if m != nil {
+               return m.WitnessAssetDefinition
+       }
+       return nil
+}
+
+func (m *Issuance) GetWitnessArguments() [][]byte {
+       if m != nil {
+               return m.WitnessArguments
+       }
+       return nil
+}
+
+func (m *Issuance) GetWitnessAnchoredId() *Hash {
+       if m != nil {
+               return m.WitnessAnchoredId
+       }
+       return nil
+}
+
+func (m *Issuance) GetOrdinal() uint64 {
+       if m != nil {
+               return m.Ordinal
+       }
+       return 0
+}
+
+type Spend struct {
+       SpentOutputId      *Hash             `protobuf:"bytes,1,opt,name=spent_output_id,json=spentOutputId" json:"spent_output_id,omitempty"`
+       Data               *Hash             `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"`
+       ExtHash            *Hash             `protobuf:"bytes,3,opt,name=ext_hash,json=extHash" json:"ext_hash,omitempty"`
+       WitnessDestination *ValueDestination `protobuf:"bytes,4,opt,name=witness_destination,json=witnessDestination" json:"witness_destination,omitempty"`
+       WitnessArguments   [][]byte          `protobuf:"bytes,5,rep,name=witness_arguments,json=witnessArguments,proto3" json:"witness_arguments,omitempty"`
+       WitnessAnchoredId  *Hash             `protobuf:"bytes,6,opt,name=witness_anchored_id,json=witnessAnchoredId" json:"witness_anchored_id,omitempty"`
+       Ordinal            uint64            `protobuf:"varint,7,opt,name=ordinal" json:"ordinal,omitempty"`
+}
+
+func (m *Spend) Reset()                    { *m = Spend{} }
+func (m *Spend) String() string            { return proto.CompactTextString(m) }
+func (*Spend) ProtoMessage()               {}
+func (*Spend) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+
+func (m *Spend) GetSpentOutputId() *Hash {
+       if m != nil {
+               return m.SpentOutputId
+       }
+       return nil
+}
+
+func (m *Spend) GetData() *Hash {
+       if m != nil {
+               return m.Data
+       }
+       return nil
+}
+
+func (m *Spend) GetExtHash() *Hash {
+       if m != nil {
+               return m.ExtHash
+       }
+       return nil
+}
+
+func (m *Spend) GetWitnessDestination() *ValueDestination {
+       if m != nil {
+               return m.WitnessDestination
+       }
+       return nil
+}
+
+func (m *Spend) GetWitnessArguments() [][]byte {
+       if m != nil {
+               return m.WitnessArguments
+       }
+       return nil
+}
+
+func (m *Spend) GetWitnessAnchoredId() *Hash {
+       if m != nil {
+               return m.WitnessAnchoredId
+       }
+       return nil
+}
+
+func (m *Spend) GetOrdinal() uint64 {
+       if m != nil {
+               return m.Ordinal
+       }
+       return 0
+}
+
+func init() {
+       proto.RegisterType((*Hash)(nil), "bc.Hash")
+       proto.RegisterType((*Program)(nil), "bc.Program")
+       proto.RegisterType((*AssetID)(nil), "bc.AssetID")
+       proto.RegisterType((*AssetAmount)(nil), "bc.AssetAmount")
+       proto.RegisterType((*AssetDefinition)(nil), "bc.AssetDefinition")
+       proto.RegisterType((*ValueSource)(nil), "bc.ValueSource")
+       proto.RegisterType((*ValueDestination)(nil), "bc.ValueDestination")
+       proto.RegisterType((*BlockHeader)(nil), "bc.BlockHeader")
+       proto.RegisterType((*TxHeader)(nil), "bc.TxHeader")
+       proto.RegisterType((*Mux)(nil), "bc.Mux")
+       proto.RegisterType((*Nonce)(nil), "bc.Nonce")
+       proto.RegisterType((*Output)(nil), "bc.Output")
+       proto.RegisterType((*Retirement)(nil), "bc.Retirement")
+       proto.RegisterType((*TimeRange)(nil), "bc.TimeRange")
+       proto.RegisterType((*Issuance)(nil), "bc.Issuance")
+       proto.RegisterType((*Spend)(nil), "bc.Spend")
+}
+
+func init() { proto.RegisterFile("bc.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+       // 957 bytes of a gzipped FileDescriptorProto
+       0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xdf, 0x6e, 0xe3, 0xc4,
+       0x17, 0x96, 0x13, 0x27, 0x76, 0x4e, 0xba, 0x4d, 0x3a, 0xad, 0x2a, 0x6b, 0xf5, 0xfb, 0xa1, 0x62,
+       0x54, 0x76, 0x57, 0xa0, 0xaa, 0xdb, 0x16, 0xc4, 0x05, 0x37, 0x85, 0x02, 0xeb, 0x8b, 0x00, 0xf2,
+       0x56, 0x7b, 0x6b, 0x4d, 0xec, 0xd9, 0xc6, 0x22, 0x9e, 0x09, 0x9e, 0x71, 0xc8, 0x35, 0x8f, 0xc0,
+       0x2d, 0x6f, 0xc1, 0x33, 0xec, 0x03, 0xf0, 0x18, 0x5c, 0xf3, 0x04, 0x68, 0x8e, 0xc7, 0xce, 0x9f,
+       0x26, 0x69, 0x2a, 0x96, 0xbb, 0x9c, 0x39, 0x67, 0xce, 0x9f, 0xef, 0x7c, 0x9f, 0x33, 0xe0, 0x0e,
+       0xe3, 0xb3, 0x49, 0x2e, 0x94, 0x20, 0x8d, 0x61, 0xec, 0x7f, 0x0b, 0xf6, 0x2b, 0x2a, 0x47, 0x64,
+       0x1f, 0x1a, 0xd3, 0x73, 0xcf, 0x3a, 0xb1, 0x9e, 0xb7, 0xc3, 0xc6, 0xf4, 0x1c, 0xed, 0x97, 0x5e,
+       0xc3, 0xd8, 0x2f, 0xd1, 0xbe, 0xf0, 0x9a, 0xc6, 0xbe, 0x40, 0xfb, 0xd2, 0xb3, 0x8d, 0x7d, 0xe9,
+       0x7f, 0x09, 0xce, 0x8f, 0xb9, 0xb8, 0xcb, 0x69, 0x46, 0xfe, 0x0f, 0x30, 0xcd, 0xa2, 0x29, 0xcb,
+       0x65, 0x2a, 0x38, 0xa6, 0xb4, 0xc3, 0xce, 0x34, 0x7b, 0x53, 0x1e, 0x10, 0x02, 0x76, 0x2c, 0x12,
+       0x86, 0xb9, 0xf7, 0x42, 0xfc, 0xed, 0x07, 0xe0, 0x5c, 0x4b, 0xc9, 0x54, 0x70, 0xf3, 0xaf, 0x1b,
+       0x19, 0x40, 0x17, 0x53, 0x5d, 0x67, 0xa2, 0xe0, 0x8a, 0x7c, 0x0c, 0x2e, 0xd5, 0x66, 0x94, 0x26,
+       0x98, 0xb4, 0x7b, 0xd1, 0x3d, 0x1b, 0xc6, 0x67, 0xa6, 0x5a, 0xe8, 0xa0, 0x33, 0x48, 0xc8, 0x31,
+       0xb4, 0x29, 0xde, 0xc0, 0x52, 0x76, 0x68, 0x2c, 0xff, 0x77, 0x0b, 0x7a, 0x18, 0x7c, 0xc3, 0xde,
+       0xa6, 0x3c, 0x55, 0x7a, 0x82, 0x0b, 0xe8, 0xe3, 0x4f, 0x3a, 0x8e, 0x86, 0x63, 0x11, 0xff, 0x34,
+       0xcf, 0xed, 0xea, 0xdc, 0x1a, 0xcf, 0x70, 0xdf, 0x44, 0x7c, 0xa5, 0x03, 0x82, 0x84, 0x7c, 0x0e,
+       0xfd, 0x54, 0xca, 0x82, 0xf2, 0x98, 0x45, 0x93, 0x12, 0x28, 0xac, 0x64, 0xfa, 0x31, 0xd8, 0x85,
+       0xbd, 0x2a, 0xa8, 0x02, 0xf3, 0x7f, 0x60, 0x27, 0x54, 0x51, 0x1c, 0x78, 0x31, 0x3f, 0x9e, 0xfa,
+       0x63, 0xe8, 0xbe, 0xa1, 0xe3, 0x82, 0xbd, 0x16, 0x45, 0x1e, 0x33, 0xf2, 0x14, 0x9a, 0x39, 0x7b,
+       0x7b, 0xaf, 0x17, 0x7d, 0x48, 0x4e, 0xa1, 0x35, 0xd5, 0xa1, 0xa6, 0x6a, 0xaf, 0x46, 0xa1, 0x04,
+       0x2a, 0x2c, 0xbd, 0xe4, 0x29, 0xb8, 0x13, 0x21, 0x71, 0x4e, 0xac, 0x69, 0x87, 0xb5, 0xed, 0xff,
+       0x0c, 0x7d, 0xac, 0x76, 0xc3, 0xa4, 0x4a, 0x39, 0x45, 0x2c, 0xfe, 0xe3, 0x92, 0xbf, 0x36, 0xa1,
+       0x8b, 0x10, 0xbe, 0x62, 0x34, 0x61, 0x39, 0xf1, 0xc0, 0x59, 0x26, 0x56, 0x65, 0xea, 0x05, 0x8e,
+       0x58, 0x7a, 0x37, 0xaa, 0x17, 0x58, 0x5a, 0xe4, 0x0a, 0x0e, 0x26, 0x39, 0x9b, 0xa6, 0xa2, 0x90,
+       0xf3, 0x6d, 0xad, 0xa2, 0xd9, 0xab, 0x42, 0xaa, 0x75, 0x7d, 0x08, 0x7b, 0x2a, 0xcd, 0x98, 0x54,
+       0x34, 0x9b, 0x44, 0x99, 0x44, 0x7e, 0xd9, 0x61, 0xb7, 0x3e, 0x1b, 0x48, 0xf2, 0x19, 0x1c, 0xa8,
+       0x9c, 0x72, 0x49, 0x63, 0xdd, 0xa9, 0x8c, 0x72, 0x21, 0x94, 0xd7, 0x5a, 0x49, 0xdc, 0x5f, 0x0c,
+       0x09, 0x85, 0x50, 0xe4, 0x05, 0x74, 0x91, 0x73, 0xe6, 0x42, 0x7b, 0xe5, 0x02, 0x94, 0x4e, 0x0c,
+       0xbd, 0x82, 0x63, 0xce, 0x66, 0x2a, 0x8a, 0x05, 0x97, 0x8c, 0xcb, 0x42, 0xd6, 0xcc, 0x71, 0x50,
+       0x3b, 0x47, 0xda, 0xfb, 0x75, 0xe5, 0xac, 0x18, 0xf3, 0x11, 0xb8, 0xfa, 0xd2, 0x88, 0xca, 0x91,
+       0xe7, 0xae, 0x64, 0x77, 0xd8, 0x4c, 0xa1, 0xdc, 0x3f, 0x81, 0x83, 0x5f, 0x52, 0xc5, 0x99, 0x94,
+       0x11, 0xcd, 0xef, 0x8a, 0x8c, 0x71, 0x25, 0xbd, 0xce, 0x49, 0xf3, 0xf9, 0x5e, 0xd8, 0x37, 0x8e,
+       0xeb, 0xea, 0xdc, 0xff, 0xd3, 0x02, 0xf7, 0x76, 0xf6, 0xe0, 0x06, 0x9e, 0x01, 0xe4, 0x4c, 0x16,
+       0x63, 0xad, 0x35, 0xe9, 0x35, 0x4e, 0x9a, 0x4b, 0xa5, 0x3b, 0xa5, 0x2f, 0x48, 0xe4, 0x76, 0x4e,
+       0x93, 0x0f, 0xa0, 0x9b, 0xa5, 0x3c, 0xd2, 0x50, 0xcf, 0x91, 0xef, 0x64, 0x29, 0xbf, 0x4d, 0x33,
+       0x36, 0x90, 0xe8, 0xa7, 0xb3, 0xda, 0xdf, 0x32, 0x7e, 0x3a, 0x33, 0xfe, 0xc5, 0xf9, 0xdb, 0x1b,
+       0xe6, 0xf7, 0xff, 0xb6, 0xa0, 0x39, 0x28, 0x66, 0xe4, 0x05, 0x38, 0x12, 0xb5, 0x23, 0x3d, 0x0b,
+       0x1b, 0x46, 0x92, 0x2e, 0x68, 0x2a, 0xac, 0xfc, 0xe4, 0x14, 0x9c, 0x2d, 0xc2, 0xad, 0x7c, 0x4b,
+       0xe5, 0x9b, 0x9b, 0xe0, 0xff, 0x0e, 0x8e, 0x2a, 0xf8, 0x93, 0xb9, 0x98, 0xf4, 0xb0, 0xba, 0x87,
+       0xa3, 0xba, 0x87, 0x05, 0xa5, 0x85, 0x87, 0xe6, 0xc6, 0xc2, 0x99, 0x5c, 0xbf, 0xc7, 0xd6, 0x86,
+       0x3d, 0xfe, 0x65, 0x41, 0xeb, 0x7b, 0xc1, 0x63, 0xb6, 0x38, 0x8b, 0xb5, 0x65, 0x96, 0x4f, 0xe1,
+       0x09, 0xc2, 0x9c, 0x53, 0x7e, 0xc7, 0xb4, 0x6e, 0x1a, 0x2b, 0x03, 0xa1, 0x20, 0x42, 0xed, 0x0d,
+       0x92, 0xdd, 0x26, 0x5f, 0xdb, 0xb0, 0xbd, 0xbe, 0x61, 0xf2, 0x05, 0x1c, 0xd6, 0xc1, 0x3c, 0x1e,
+       0x89, 0x9c, 0x25, 0xba, 0x8b, 0x55, 0x91, 0x55, 0x19, 0xaf, 0x4d, 0x4c, 0x90, 0xf8, 0xef, 0x2c,
+       0x68, 0xff, 0x50, 0xa8, 0x49, 0xa1, 0xc8, 0x33, 0x68, 0x97, 0x2b, 0x34, 0xa3, 0xde, 0xdb, 0xb0,
+       0x71, 0x93, 0x2b, 0xe8, 0xc5, 0x82, 0xab, 0x5c, 0x8c, 0xb7, 0x7d, 0xa1, 0xf7, 0x4d, 0xcc, 0x4e,
+       0x1f, 0xe8, 0x25, 0x4c, 0xec, 0x4d, 0x98, 0x78, 0xe0, 0x88, 0x3c, 0x49, 0x39, 0x1d, 0x1b, 0x36,
+       0x57, 0xa6, 0xff, 0x9b, 0x05, 0x10, 0x32, 0x95, 0xe6, 0x4c, 0x03, 0xb2, 0xfb, 0x28, 0x55, 0x53,
+       0x8d, 0x07, 0x9b, 0x6a, 0xee, 0xd0, 0x94, 0xbd, 0xdc, 0xd4, 0x04, 0x3a, 0xb7, 0xd5, 0xda, 0x57,
+       0xd5, 0x6a, 0x3d, 0xa0, 0xd6, 0xc6, 0x36, 0xb5, 0x6e, 0xea, 0xc5, 0xff, 0xa3, 0x09, 0x6e, 0x60,
+       0xfe, 0x18, 0xc9, 0x29, 0x74, 0x4a, 0x32, 0xac, 0xfb, 0xdb, 0x75, 0x4b, 0x57, 0x90, 0xec, 0xfa,
+       0xe7, 0xf3, 0x1e, 0xd6, 0xf7, 0xcd, 0x9c, 0xa5, 0x0b, 0x62, 0x36, 0x2c, 0x5d, 0xaf, 0x65, 0x72,
+       0x5f, 0xcb, 0x64, 0x00, 0x5e, 0x4d, 0x76, 0x7c, 0xb1, 0x24, 0xf5, 0x8b, 0xc3, 0x7c, 0xc7, 0x0e,
+       0xeb, 0x19, 0xe6, 0x8f, 0x91, 0xf0, 0xb8, 0x22, 0xff, 0xca, 0x23, 0x65, 0xad, 0xd0, 0x9c, 0xc7,
+       0x09, 0xcd, 0x7d, 0x50, 0x68, 0x8b, 0x34, 0xe9, 0x2c, 0xd3, 0xe4, 0x5d, 0x03, 0x5a, 0xaf, 0x27,
+       0x8c, 0x27, 0xe4, 0x1c, 0x7a, 0x72, 0xc2, 0xb8, 0x8a, 0x04, 0x2a, 0x72, 0xdd, 0xde, 0x9e, 0x60,
+       0x40, 0xa9, 0xd8, 0x20, 0x79, 0x1f, 0xfc, 0xdd, 0xb0, 0x15, 0xfb, 0x91, 0x5b, 0x79, 0xcc, 0x07,
+       0x76, 0x13, 0x8c, 0xed, 0x47, 0xc1, 0xe8, 0x2c, 0xc1, 0x38, 0x6c, 0xe3, 0x5b, 0xfd, 0xf2, 0x9f,
+       0x00, 0x00, 0x00, 0xff, 0xff, 0x58, 0x32, 0x78, 0x25, 0xb7, 0x0b, 0x00, 0x00,
+}
diff --git a/protocol/bc/bc.proto b/protocol/bc/bc.proto
new file mode 100644 (file)
index 0000000..0c61588
--- /dev/null
@@ -0,0 +1,131 @@
+syntax = "proto3";
+
+package bc;
+
+message Hash {
+  fixed64 v0 = 1;
+  fixed64 v1 = 2;
+  fixed64 v2 = 3;
+  fixed64 v3 = 4;
+}
+
+message Program {
+  uint64 vm_version = 1;
+  bytes  code       = 2;
+}
+
+// This message type duplicates Hash, above. One alternative is to
+// embed a Hash inside an AssetID. But it's useful for AssetID to be
+// plain old data (without pointers). Another alternative is use Hash
+// in any protobuf types where an AssetID is called for, but it's
+// preferable to have type safety.
+message AssetID {
+  fixed64 v0 = 1;
+  fixed64 v1 = 2;
+  fixed64 v2 = 3;
+  fixed64 v3 = 4;
+}
+
+message AssetAmount {
+  AssetID asset_id = 1;
+  uint64  amount   = 2;
+}
+
+message AssetDefinition {
+  Hash    initial_block_id = 1;
+  Program issuance_program = 2;
+  Hash    data             = 3;
+}
+
+message ValueSource {
+  Hash        ref      = 1;
+  AssetAmount value    = 2;
+  uint64      position = 3;
+}
+
+message ValueDestination {
+  Hash        ref      = 1;
+  AssetAmount value    = 2;
+  uint64      position = 3;
+}
+
+message BlockHeader {
+  uint64 version                   = 1;
+  uint64 height                    = 2;
+  Hash   previous_block_id         = 3;
+  uint64 timestamp_ms              = 4;
+  Hash   transactions_root         = 5;
+  Hash   assets_root               = 6;
+  bytes  next_consensus_program    = 7;
+  Hash   ext_hash                  = 8;
+
+  repeated bytes witness_arguments = 9;
+}
+
+message TxHeader {
+  uint64        version     = 1;
+  repeated Hash result_ids  = 2;
+  Hash          data        = 3;
+  uint64        min_time_ms = 4;
+  uint64        max_time_ms = 5;
+  Hash          ext_hash    = 6;
+}
+
+message Mux {
+  repeated ValueSource      sources              = 1; // issuances, spends, and muxes
+  Program                   program              = 2;
+  Hash                      ext_hash             = 3;
+  repeated ValueDestination witness_destinations = 4; // outputs, retirements, and muxes
+  repeated bytes            witness_arguments    = 5;
+}
+
+message Nonce {
+  Program        program             = 1;
+  Hash           time_range_id       = 2;
+  Hash           ext_hash            = 3;
+  repeated bytes witness_arguments   = 4;
+  Hash           witness_anchored_id = 5;
+}
+
+message Output {
+  ValueSource source          = 1;
+  Program     control_program = 2;
+  Hash        data            = 3;
+  Hash        ext_hash        = 4;
+  uint64      ordinal         = 5;
+}
+
+message Retirement {
+  ValueSource source   = 1;
+  Hash        data     = 2;
+  Hash        ext_hash = 3;
+  uint64      ordinal  = 4;
+}
+
+message TimeRange {
+  uint64    min_time_ms = 1;
+  uint64    max_time_ms = 2;
+  Hash      ext_hash    = 3;
+}
+
+message Issuance {
+  Hash             anchor_id                = 1;
+  AssetAmount      value                    = 2;
+  Hash             data                     = 3;
+  Hash             ext_hash                 = 4;
+  ValueDestination witness_destination      = 5;
+  AssetDefinition  witness_asset_definition = 6;
+  repeated bytes   witness_arguments        = 7;
+  Hash             witness_anchored_id      = 8;
+  uint64           ordinal                  = 9;
+}
+
+message Spend {
+  Hash             spent_output_id     = 1;
+  Hash             data                = 2;
+  Hash             ext_hash            = 3;
+  ValueDestination witness_destination = 4;
+  repeated bytes   witness_arguments   = 5;
+  Hash             witness_anchored_id = 6;
+  uint64           ordinal             = 7;
+}
diff --git a/protocol/bc/bctest/tx.go b/protocol/bc/bctest/tx.go
new file mode 100644 (file)
index 0000000..80bb2f7
--- /dev/null
@@ -0,0 +1,82 @@
+// Package bctest provides utilities for constructing blockchain data
+// structures.
+package bctest
+
+import (
+       "crypto/rand"
+       "testing"
+       "time"
+
+       "golang.org/x/crypto/sha3"
+
+       "chain/crypto/ed25519/chainkd"
+       "chain/protocol/bc"
+       "chain/protocol/bc/legacy"
+       "chain/protocol/vm"
+       "chain/protocol/vm/vmutil"
+       "chain/testutil"
+)
+
+// NewIssuanceTx creates a new signed, issuance transaction issuing 100 units
+// of a new asset to a garbage control program. The resulting transaction has
+// one input and one output.
+//
+// The asset issued is created from randomly-generated keys. The resulting
+// transaction is finalized (signed with a TXSIGHASH commitment).
+func NewIssuanceTx(tb testing.TB, initial bc.Hash, opts ...func(*legacy.Tx)) *legacy.Tx {
+       // Generate a random key pair for the asset being issued.
+       xprv, xpub, err := chainkd.NewXKeys(nil)
+       if err != nil {
+               testutil.FatalErr(tb, err)
+       }
+       pubkeys := chainkd.XPubKeys([]chainkd.XPub{xpub})
+
+       // Create a corresponding issuance program.
+       sigProg, err := vmutil.P2SPMultiSigProgram(pubkeys, 1)
+       if err != nil {
+               testutil.FatalErr(tb, err)
+       }
+       builder := vmutil.NewBuilder()
+       builder.AddRawBytes(sigProg)
+       issuanceProgram, _ := builder.Build()
+
+       // Create a transaction issuing this new asset.
+       var nonce [8]byte
+       _, err = rand.Read(nonce[:])
+       if err != nil {
+               testutil.FatalErr(tb, err)
+       }
+       assetdef := []byte(`{"type": "prottest issuance"}`)
+       txin := legacy.NewIssuanceInput(nonce[:], 100, nil, initial, issuanceProgram, nil, assetdef)
+
+       tx := legacy.NewTx(legacy.TxData{
+               Version: 1,
+               MinTime: bc.Millis(time.Now().Add(-5 * time.Minute)),
+               MaxTime: bc.Millis(time.Now().Add(5 * time.Minute)),
+               Inputs:  []*legacy.TxInput{txin},
+               Outputs: []*legacy.TxOutput{
+                       legacy.NewTxOutput(txin.AssetID(), 100, []byte{0xbe, 0xef}, nil),
+               },
+       })
+
+       for _, opt := range opts {
+               opt(tx)
+       }
+
+       // Sign with a simple TXSIGHASH signature.
+       builder = vmutil.NewBuilder()
+       h := tx.SigHash(0)
+       builder.AddData(h.Bytes())
+       builder.AddOp(vm.OP_TXSIGHASH).AddOp(vm.OP_EQUAL)
+       sigprog, _ := builder.Build()
+       sigproghash := sha3.Sum256(sigprog)
+       signature := xprv.Sign(sigproghash[:])
+
+       var witness [][]byte
+       witness = append(witness, vm.Int64Bytes(0)) // 0 args to the sigprog
+       witness = append(witness, signature)
+       witness = append(witness, sigprog)
+       tx.SetInputArguments(0, witness)
+
+       return tx
+}
diff --git a/protocol/bc/block.go b/protocol/bc/block.go
new file mode 100644 (file)
index 0000000..830a4e3
--- /dev/null
@@ -0,0 +1,7 @@
+package bc
+
+type Block struct {
+       *BlockHeader
+       ID           Hash
+       Transactions []*Tx
+}
diff --git a/protocol/bc/blockheader.go b/protocol/bc/blockheader.go
new file mode 100644 (file)
index 0000000..4e76347
--- /dev/null
@@ -0,0 +1,32 @@
+package bc
+
+import "io"
+
+// BlockHeader contains the header information for a blockchain
+// block. It satisfies the Entry interface.
+
+func (BlockHeader) typ() string { return "blockheader" }
+func (bh *BlockHeader) writeForHash(w io.Writer) {
+       mustWriteForHash(w, bh.Version)
+       mustWriteForHash(w, bh.Height)
+       mustWriteForHash(w, bh.PreviousBlockId)
+       mustWriteForHash(w, bh.TimestampMs)
+       mustWriteForHash(w, bh.TransactionsRoot)
+       mustWriteForHash(w, bh.AssetsRoot)
+       mustWriteForHash(w, bh.NextConsensusProgram)
+       mustWriteForHash(w, bh.ExtHash)
+}
+
+// NewBlockHeader creates a new BlockHeader and populates
+// its body.
+func NewBlockHeader(version, height uint64, previousBlockID *Hash, timestampMS uint64, transactionsRoot, assetsRoot *Hash, nextConsensusProgram []byte) *BlockHeader {
+       return &BlockHeader{
+               Version:              version,
+               Height:               height,
+               PreviousBlockId:      previousBlockID,
+               TimestampMs:          timestampMS,
+               TransactionsRoot:     transactionsRoot,
+               AssetsRoot:           assetsRoot,
+               NextConsensusProgram: nextConsensusProgram,
+       }
+}
diff --git a/protocol/bc/doc.go b/protocol/bc/doc.go
new file mode 100644 (file)
index 0000000..cd47809
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+Package bc provides the fundamental blockchain data structures used in
+the Chain Protocol.
+
+This package is in transition from a set of "old" data structures
+(TxData, TxInput, TxOutput, etc.) to a new data model based on
+"entries," each with a specific type (such as spend, issuance, output,
+etc.), and each with its own distinct hash. The hash of a designated
+"header" entry serves as the hash of the entire transaction. The
+rationale for this change is that it is considerably more extensible,
+and it allows future scripting tools to traverse and access
+transaction data by making all components hash-addressable.
+
+Hashing and validation (of the old types) are redefined to mean
+"convert to the new data structures and hash/validate that."
+
+Soon the old structures will be retired entirely.
+
+These changes will be made in a compatible way; in particular, block
+and transaction hashes will not change.
+*/
+package bc
diff --git a/protocol/bc/entry.go b/protocol/bc/entry.go
new file mode 100644 (file)
index 0000000..a7d809d
--- /dev/null
@@ -0,0 +1,166 @@
+package bc
+
+import (
+       "fmt"
+       "io"
+       "reflect"
+
+       "github.com/golang/protobuf/proto"
+
+       "chain/crypto/sha3pool"
+       "chain/encoding/blockchain"
+       "chain/errors"
+)
+
+// Entry is the interface implemented by each addressable unit in a
+// blockchain: transaction components such as spends, issuances,
+// outputs, and retirements (among others), plus blockheaders.
+type Entry interface {
+       proto.Message
+
+       // type produces a short human-readable string uniquely identifying
+       // the type of this entry.
+       typ() string
+
+       // writeForHash writes the entry's body for hashing.
+       writeForHash(w io.Writer)
+}
+
+var errInvalidValue = errors.New("invalid value")
+
+// EntryID computes the identifier of an entry, as the hash of its
+// body plus some metadata.
+func EntryID(e Entry) (hash Hash) {
+       if e == nil {
+               return hash
+       }
+
+       // Nil pointer; not the same as nil interface above. (See
+       // https://golang.org/doc/faq#nil_error.)
+       if v := reflect.ValueOf(e); v.Kind() == reflect.Ptr && v.IsNil() {
+               return hash
+       }
+
+       hasher := sha3pool.Get256()
+       defer sha3pool.Put256(hasher)
+
+       hasher.Write([]byte("entryid:"))
+       hasher.Write([]byte(e.typ()))
+       hasher.Write([]byte{':'})
+
+       bh := sha3pool.Get256()
+       defer sha3pool.Put256(bh)
+
+       e.writeForHash(bh)
+
+       var innerHash [32]byte
+       bh.Read(innerHash[:])
+
+       hasher.Write(innerHash[:])
+
+       hash.ReadFrom(hasher)
+       return hash
+}
+
+var byte32zero [32]byte
+
+// mustWriteForHash serializes the object c to the writer w, from which
+// presumably a hash can be extracted.
+//
+// This function may panic with an error from the underlying writer,
+// and may produce errors of its own if passed objects whose
+// hash-serialization formats are not specified. It MUST NOT produce
+// errors in other cases.
+func mustWriteForHash(w io.Writer, c interface{}) {
+       err := writeForHash(w, c)
+       if err != nil {
+               panic(err)
+       }
+}
+
+func writeForHash(w io.Writer, c interface{}) error {
+       switch v := c.(type) {
+       case byte:
+               _, err := w.Write([]byte{v})
+               return errors.Wrap(err, "writing byte for hash")
+       case uint64:
+               _, err := blockchain.WriteVarint63(w, v)
+               return errors.Wrapf(err, "writing uint64 (%d) for hash", v)
+       case []byte:
+               _, err := blockchain.WriteVarstr31(w, v)
+               return errors.Wrapf(err, "writing []byte (len %d) for hash", len(v))
+       case [][]byte:
+               _, err := blockchain.WriteVarstrList(w, v)
+               return errors.Wrapf(err, "writing [][]byte (len %d) for hash", len(v))
+       case string:
+               _, err := blockchain.WriteVarstr31(w, []byte(v))
+               return errors.Wrapf(err, "writing string (len %d) for hash", len(v))
+       case *Hash:
+               if v == nil {
+                       _, err := w.Write(byte32zero[:])
+                       return errors.Wrap(err, "writing nil *Hash for hash")
+               }
+               _, err := w.Write(v.Bytes())
+               return errors.Wrap(err, "writing *Hash for hash")
+       case *AssetID:
+               if v == nil {
+                       _, err := w.Write(byte32zero[:])
+                       return errors.Wrap(err, "writing nil *AssetID for hash")
+               }
+               _, err := w.Write(v.Bytes())
+               return errors.Wrap(err, "writing *AssetID for hash")
+       case Hash:
+               _, err := v.WriteTo(w)
+               return errors.Wrap(err, "writing Hash for hash")
+       case AssetID:
+               _, err := v.WriteTo(w)
+               return errors.Wrap(err, "writing AssetID for hash")
+       }
+
+       // The two container types in the spec (List and Struct)
+       // correspond to slices and structs in Go. They can't be
+       // handled with type assertions, so we must use reflect.
+       switch v := reflect.ValueOf(c); v.Kind() {
+       case reflect.Ptr:
+               if v.IsNil() {
+                       return nil
+               }
+               elem := v.Elem()
+               return writeForHash(w, elem.Interface())
+       case reflect.Slice:
+               l := v.Len()
+               _, err := blockchain.WriteVarint31(w, uint64(l))
+               if err != nil {
+                       return errors.Wrapf(err, "writing slice (len %d) for hash", l)
+               }
+               for i := 0; i < l; i++ {
+                       c := v.Index(i)
+                       if !c.CanInterface() {
+                               return errInvalidValue
+                       }
+                       err := writeForHash(w, c.Interface())
+                       if err != nil {
+                               return errors.Wrapf(err, "writing slice element %d for hash", i)
+                       }
+               }
+               return nil
+
+       case reflect.Struct:
+               typ := v.Type()
+               for i := 0; i < typ.NumField(); i++ {
+                       c := v.Field(i)
+                       if !c.CanInterface() {
+                               return errInvalidValue
+                       }
+                       err := writeForHash(w, c.Interface())
+                       if err != nil {
+                               t := v.Type()
+                               f := t.Field(i)
+                               return errors.Wrapf(err, "writing struct field %d (%s.%s) for hash", i, t.Name(), f.Name)
+                       }
+               }
+               return nil
+       }
+
+       return errors.Wrap(fmt.Errorf("bad type %T", c))
+}
diff --git a/protocol/bc/entry_test.go b/protocol/bc/entry_test.go
new file mode 100644 (file)
index 0000000..51b41a0
--- /dev/null
@@ -0,0 +1,30 @@
+package bc
+
+import (
+       "reflect"
+       "testing"
+       "time"
+)
+
+func BenchmarkEntryID(b *testing.B) {
+       m := NewMux([]*ValueSource{{Position: 1}}, &Program{Code: []byte{1}, VmVersion: 1})
+
+       entries := []Entry{
+               NewIssuance(nil, &AssetAmount{}, &Hash{}, 0),
+               NewTxHeader(1, nil, &Hash{}, uint64(time.Now().Unix()), uint64(time.Now().Unix())),
+               m,
+               NewNonce(&Program{Code: []byte{1}, VmVersion: 1}, nil),
+               NewOutput(&ValueSource{}, &Program{Code: []byte{1}, VmVersion: 1}, &Hash{}, 0),
+               NewRetirement(&ValueSource{}, &Hash{}, 1),
+               NewSpend(&Hash{}, &Hash{}, 0),
+       }
+
+       for _, e := range entries {
+               name := reflect.TypeOf(e).Elem().Name()
+               b.Run(name, func(b *testing.B) {
+                       for i := 0; i < b.N; i++ {
+                               EntryID(e)
+                       }
+               })
+       }
+}
diff --git a/protocol/bc/gen.go b/protocol/bc/gen.go
new file mode 100644 (file)
index 0000000..a6f3df3
--- /dev/null
@@ -0,0 +1,3 @@
+package bc
+
+//go:generate protoc --go_out=. bc.proto
diff --git a/protocol/bc/hash.go b/protocol/bc/hash.go
new file mode 100644 (file)
index 0000000..c2df967
--- /dev/null
@@ -0,0 +1,120 @@
+package bc
+
+import (
+       "bytes"
+       "database/sql/driver"
+       "encoding/binary"
+       "encoding/hex"
+       "encoding/json"
+       "fmt"
+       "io"
+
+       "golang.org/x/crypto/sha3"
+)
+
+// Hash represents a 256-bit hash.
+
+var EmptyStringHash = NewHash(sha3.Sum256(nil))
+
+func NewHash(b32 [32]byte) (h Hash) {
+       h.V0 = binary.BigEndian.Uint64(b32[0:8])
+       h.V1 = binary.BigEndian.Uint64(b32[8:16])
+       h.V2 = binary.BigEndian.Uint64(b32[16:24])
+       h.V3 = binary.BigEndian.Uint64(b32[24:32])
+       return h
+}
+
+func (h Hash) Byte32() (b32 [32]byte) {
+       binary.BigEndian.PutUint64(b32[0:8], h.V0)
+       binary.BigEndian.PutUint64(b32[8:16], h.V1)
+       binary.BigEndian.PutUint64(b32[16:24], h.V2)
+       binary.BigEndian.PutUint64(b32[24:32], h.V3)
+       return b32
+}
+
+// MarshalText satisfies the TextMarshaler interface.
+// It returns the bytes of h encoded in hex,
+// for formats that can't hold arbitrary binary data.
+// It never returns an error.
+func (h Hash) MarshalText() ([]byte, error) {
+       b := h.Byte32()
+       v := make([]byte, 64)
+       hex.Encode(v, b[:])
+       return v, nil
+}
+
+// UnmarshalText satisfies the TextUnmarshaler interface.
+// It decodes hex data from b into h.
+func (h *Hash) UnmarshalText(v []byte) error {
+       var b [32]byte
+       if len(v) != 64 {
+               return fmt.Errorf("bad length hash string %d", len(v))
+       }
+       _, err := hex.Decode(b[:], v)
+       *h = NewHash(b)
+       return err
+}
+
+// UnmarshalJSON satisfies the json.Unmarshaler interface.
+// If b is a JSON-encoded null, it copies the zero-value into h. Othwerwise, it
+// decodes hex data from b into h.
+func (h *Hash) UnmarshalJSON(b []byte) error {
+       if bytes.Equal(b, []byte("null")) {
+               *h = Hash{}
+               return nil
+       }
+       var s string
+       err := json.Unmarshal(b, &s)
+       if err != nil {
+               return err
+       }
+       return h.UnmarshalText([]byte(s))
+}
+
+func (h Hash) Bytes() []byte {
+       b32 := h.Byte32()
+       return b32[:]
+}
+
+// Value satisfies the driver.Valuer interface
+func (h Hash) Value() (driver.Value, error) {
+       return h.Bytes(), nil
+}
+
+// Scan satisfies the driver.Scanner interface
+func (h *Hash) Scan(v interface{}) error {
+       var buf [32]byte
+       b, ok := v.([]byte)
+       if !ok {
+               return fmt.Errorf("Hash.Scan received unsupported type %T", v)
+       }
+       copy(buf[:], b)
+       *h = NewHash(buf)
+       return nil
+}
+
+// WriteTo satisfies the io.WriterTo interface.
+func (h Hash) WriteTo(w io.Writer) (int64, error) {
+       n, err := w.Write(h.Bytes())
+       return int64(n), err
+}
+
+// ReadFrom satisfies the io.ReaderFrom interface.
+func (h *Hash) ReadFrom(r io.Reader) (int64, error) {
+       var b32 [32]byte
+       n, err := io.ReadFull(r, b32[:])
+       if err != nil {
+               return int64(n), err
+       }
+       *h = NewHash(b32)
+       return int64(n), nil
+}
+
+// IsZero tells whether a Hash pointer is nil or points to an all-zero
+// hash.
+func (h *Hash) IsZero() bool {
+       if h == nil {
+               return true
+       }
+       return *h == Hash{}
+}
diff --git a/protocol/bc/issuance.go b/protocol/bc/issuance.go
new file mode 100644 (file)
index 0000000..97f06b3
--- /dev/null
@@ -0,0 +1,34 @@
+package bc
+
+import "io"
+
+// Issuance is a source of new value on a blockchain. It satisfies the
+// Entry interface.
+//
+// (Not to be confused with the deprecated type IssuanceInput.)
+
+func (Issuance) typ() string { return "issuance1" }
+func (iss *Issuance) writeForHash(w io.Writer) {
+       mustWriteForHash(w, iss.AnchorId)
+       mustWriteForHash(w, iss.Value)
+       mustWriteForHash(w, iss.Data)
+       mustWriteForHash(w, iss.ExtHash)
+}
+
+func (iss *Issuance) SetDestination(id *Hash, val *AssetAmount, pos uint64) {
+       iss.WitnessDestination = &ValueDestination{
+               Ref:      id,
+               Value:    val,
+               Position: pos,
+       }
+}
+
+// NewIssuance creates a new Issuance.
+func NewIssuance(anchorID *Hash, value *AssetAmount, data *Hash, ordinal uint64) *Issuance {
+       return &Issuance{
+               AnchorId: anchorID,
+               Value:    value,
+               Data:     data,
+               Ordinal:  ordinal,
+       }
+}
diff --git a/protocol/bc/legacy/bc_test.go b/protocol/bc/legacy/bc_test.go
new file mode 100644 (file)
index 0000000..a69875c
--- /dev/null
@@ -0,0 +1,26 @@
+package legacy
+
+import (
+       "bytes"
+       "io"
+       "testing"
+
+       "chain/protocol/bc"
+)
+
+func serialize(t *testing.T, wt io.WriterTo) []byte {
+       var b bytes.Buffer
+       _, err := wt.WriteTo(&b)
+       if err != nil {
+               t.Fatal(err)
+       }
+       return b.Bytes()
+}
+
+func mustDecodeHash(s string) (h bc.Hash) {
+       err := h.UnmarshalText([]byte(s))
+       if err != nil {
+               panic(err)
+       }
+       return h
+}
diff --git a/protocol/bc/legacy/block.go b/protocol/bc/legacy/block.go
new file mode 100644 (file)
index 0000000..57ce185
--- /dev/null
@@ -0,0 +1,135 @@
+package legacy
+
+import (
+       "bytes"
+       "database/sql/driver"
+       "encoding/hex"
+       "fmt"
+       "io"
+
+       "chain/encoding/blockchain"
+       "chain/encoding/bufpool"
+       "chain/errors"
+)
+
+const (
+       SerBlockWitness      = 1
+       SerBlockTransactions = 2
+
+       SerBlockSigHash = 0
+       SerBlockHeader  = SerBlockWitness
+       SerBlockFull    = SerBlockWitness | SerBlockTransactions
+)
+
+// Block describes a complete block, including its header
+// and the transactions it contains.
+type Block struct {
+       BlockHeader
+       Transactions []*Tx
+}
+
+// MarshalText fulfills the json.Marshaler interface.
+// This guarantees that blocks will get deserialized correctly
+// when being parsed from HTTP requests.
+func (b *Block) MarshalText() ([]byte, error) {
+       buf := bufpool.Get()
+       defer bufpool.Put(buf)
+       _, err := b.WriteTo(buf)
+       if err != nil {
+               return nil, err
+       }
+
+       enc := make([]byte, hex.EncodedLen(buf.Len()))
+       hex.Encode(enc, buf.Bytes())
+       return enc, nil
+}
+
+// UnmarshalText fulfills the encoding.TextUnmarshaler interface.
+func (b *Block) UnmarshalText(text []byte) error {
+       decoded := make([]byte, hex.DecodedLen(len(text)))
+       _, err := hex.Decode(decoded, text)
+       if err != nil {
+               return err
+       }
+
+       r := blockchain.NewReader(decoded)
+       err = b.readFrom(r)
+       if err != nil {
+               return err
+       }
+       if trailing := r.Len(); trailing > 0 {
+               return fmt.Errorf("trailing garbage (%d bytes)", trailing)
+       }
+       return nil
+}
+
+// Scan fulfills the sql.Scanner interface.
+func (b *Block) Scan(val interface{}) error {
+       driverBuf, ok := val.([]byte)
+       if !ok {
+               return errors.New("Scan must receive a byte slice")
+       }
+       buf := make([]byte, len(driverBuf))
+       copy(buf[:], driverBuf)
+       r := blockchain.NewReader(buf)
+       err := b.readFrom(r)
+       if err != nil {
+               return err
+       }
+       if trailing := r.Len(); trailing > 0 {
+               return fmt.Errorf("trailing garbage (%d bytes)", trailing)
+       }
+       return nil
+}
+
+// Value fulfills the sql.driver.Valuer interface.
+func (b *Block) Value() (driver.Value, error) {
+       buf := new(bytes.Buffer)
+       _, err := b.WriteTo(buf)
+       if err != nil {
+               return nil, err
+       }
+       return buf.Bytes(), nil
+}
+
+func (b *Block) readFrom(r *blockchain.Reader) error {
+       serflags, err := b.BlockHeader.readFrom(r)
+       if err != nil {
+               return err
+       }
+       if serflags&SerBlockTransactions == SerBlockTransactions {
+               n, err := blockchain.ReadVarint31(r)
+               if err != nil {
+                       return errors.Wrap(err, "reading number of transactions")
+               }
+               for ; n > 0; n-- {
+                       var data TxData
+                       err = data.readFrom(r)
+                       if err != nil {
+                               return errors.Wrapf(err, "reading transaction %d", len(b.Transactions))
+                       }
+                       // TODO(kr): store/reload hashes;
+                       // don't compute here if not necessary.
+                       tx := NewTx(data)
+                       b.Transactions = append(b.Transactions, tx)
+               }
+       }
+       return nil
+}
+
+func (b *Block) WriteTo(w io.Writer) (int64, error) {
+       ew := errors.NewWriter(w)
+       b.writeTo(ew, SerBlockFull)
+       return ew.Written(), ew.Err()
+}
+
+// assumes w has sticky errors
+func (b *Block) writeTo(w io.Writer, serflags uint8) {
+       b.BlockHeader.writeTo(w, serflags)
+       if serflags&SerBlockTransactions == SerBlockTransactions {
+               blockchain.WriteVarint31(w, uint64(len(b.Transactions))) // TODO(bobg): check and return error
+               for _, tx := range b.Transactions {
+                       tx.WriteTo(w)
+               }
+       }
+}
diff --git a/protocol/bc/legacy/block_commitment.go b/protocol/bc/legacy/block_commitment.go
new file mode 100644 (file)
index 0000000..846b4bb
--- /dev/null
@@ -0,0 +1,49 @@
+package legacy
+
+import (
+       "io"
+
+       "chain/encoding/blockchain"
+       "chain/protocol/bc"
+)
+
+type BlockCommitment struct {
+       // TransactionsMerkleRoot is the root hash of the Merkle binary hash
+       // tree formed by the hashes of all transactions included in the
+       // block.
+       TransactionsMerkleRoot bc.Hash
+
+       // AssetsMerkleRoot is the root hash of the Merkle Patricia Tree of
+       // the set of unspent outputs with asset version 1 after applying
+       // the block.
+       AssetsMerkleRoot bc.Hash
+
+       // ConsensusProgram is the predicate for validating the next block.
+       ConsensusProgram []byte
+}
+
+func (bc *BlockCommitment) readFrom(r *blockchain.Reader) error {
+       _, err := bc.TransactionsMerkleRoot.ReadFrom(r)
+       if err != nil {
+               return err
+       }
+       _, err = bc.AssetsMerkleRoot.ReadFrom(r)
+       if err != nil {
+               return err
+       }
+       bc.ConsensusProgram, err = blockchain.ReadVarstr31(r)
+       return err
+}
+
+func (bc *BlockCommitment) writeTo(w io.Writer) error {
+       _, err := bc.TransactionsMerkleRoot.WriteTo(w)
+       if err != nil {
+               return err
+       }
+       _, err = bc.AssetsMerkleRoot.WriteTo(w)
+       if err != nil {
+               return err
+       }
+       _, err = blockchain.WriteVarstr31(w, bc.ConsensusProgram)
+       return err
+}
diff --git a/protocol/bc/legacy/block_header.go b/protocol/bc/legacy/block_header.go
new file mode 100644 (file)
index 0000000..54c2c36
--- /dev/null
@@ -0,0 +1,191 @@
+package legacy
+
+import (
+       "bytes"
+       "database/sql/driver"
+       "encoding/hex"
+       "fmt"
+       "io"
+       "time"
+
+       "chain/encoding/blockchain"
+       "chain/encoding/bufpool"
+       "chain/errors"
+       "chain/protocol/bc"
+)
+
+// BlockHeader describes necessary data of the block.
+type BlockHeader struct {
+       // Version of the block.
+       Version uint64
+
+       // Height of the block in the block chain.
+       // Initial block has height 1.
+       Height uint64
+
+       // Hash of the previous block in the block chain.
+       PreviousBlockHash bc.Hash
+
+       // Time of the block in milliseconds.
+       // Must grow monotonically and can be equal
+       // to the time in the previous block.
+       TimestampMS uint64
+
+       BlockCommitment
+       CommitmentSuffix []byte
+
+       BlockWitness
+       WitnessSuffix []byte
+}
+
+// Time returns the time represented by the Timestamp in bh.
+func (bh *BlockHeader) Time() time.Time {
+       tsNano := bh.TimestampMS * uint64(time.Millisecond)
+       return time.Unix(0, int64(tsNano)).UTC()
+}
+
+func (bh *BlockHeader) Scan(val interface{}) error {
+       driverBuf, ok := val.([]byte)
+       if !ok {
+               return errors.New("Scan must receive a byte slice")
+       }
+       buf := make([]byte, len(driverBuf))
+       copy(buf[:], driverBuf)
+       _, err := bh.readFrom(blockchain.NewReader(buf))
+       return err
+}
+
+func (bh *BlockHeader) Value() (driver.Value, error) {
+       buf := new(bytes.Buffer)
+       _, err := bh.WriteTo(buf)
+       if err != nil {
+               return nil, err
+       }
+       return buf.Bytes(), nil
+}
+
+// Hash returns complete hash of the block header.
+func (bh *BlockHeader) Hash() bc.Hash {
+       h, _ := mapBlockHeader(bh)
+       return h
+}
+
+// MarshalText fulfills the json.Marshaler interface.
+// This guarantees that block headers will get deserialized correctly
+// when being parsed from HTTP requests.
+func (bh *BlockHeader) MarshalText() ([]byte, error) {
+       buf := bufpool.Get()
+       defer bufpool.Put(buf)
+       _, err := bh.WriteTo(buf)
+       if err != nil {
+               return nil, err
+       }
+
+       enc := make([]byte, hex.EncodedLen(buf.Len()))
+       hex.Encode(enc, buf.Bytes())
+       return enc, nil
+}
+
+// UnmarshalText fulfills the encoding.TextUnmarshaler interface.
+func (bh *BlockHeader) UnmarshalText(text []byte) error {
+       decoded := make([]byte, hex.DecodedLen(len(text)))
+       _, err := hex.Decode(decoded, text)
+       if err != nil {
+               return err
+       }
+       _, err = bh.readFrom(blockchain.NewReader(decoded))
+       return err
+}
+
+func (bh *BlockHeader) readFrom(r *blockchain.Reader) (uint8, error) {
+       var serflags [1]byte
+       io.ReadFull(r, serflags[:])
+       switch serflags[0] {
+       case SerBlockSigHash, SerBlockHeader, SerBlockFull:
+       default:
+               return 0, fmt.Errorf("unsupported serialization flags 0x%x", serflags)
+       }
+
+       var err error
+
+       bh.Version, err = blockchain.ReadVarint63(r)
+       if err != nil {
+               return 0, err
+       }
+
+       bh.Height, err = blockchain.ReadVarint63(r)
+       if err != nil {
+               return 0, err
+       }
+
+       _, err = bh.PreviousBlockHash.ReadFrom(r)
+       if err != nil {
+               return 0, err
+       }
+
+       bh.TimestampMS, err = blockchain.ReadVarint63(r)
+       if err != nil {
+               return 0, err
+       }
+
+       bh.CommitmentSuffix, err = blockchain.ReadExtensibleString(r, bh.BlockCommitment.readFrom)
+       if err != nil {
+               return 0, err
+       }
+
+       if serflags[0]&SerBlockWitness == SerBlockWitness {
+               bh.WitnessSuffix, err = blockchain.ReadExtensibleString(r, func(r *blockchain.Reader) (err error) {
+                       bh.Witness, err = blockchain.ReadVarstrList(r)
+                       return err
+               })
+               if err != nil {
+                       return 0, err
+               }
+       }
+
+       return serflags[0], nil
+}
+
+func (bh *BlockHeader) WriteTo(w io.Writer) (int64, error) {
+       ew := errors.NewWriter(w)
+       bh.writeTo(ew, SerBlockHeader)
+       return ew.Written(), ew.Err()
+}
+
+// writeTo writes bh to w.
+func (bh *BlockHeader) writeTo(w io.Writer, serflags uint8) error {
+       w.Write([]byte{serflags})
+
+       var err error
+
+       _, err = blockchain.WriteVarint63(w, bh.Version)
+       if err != nil {
+               return err
+       }
+       _, err = blockchain.WriteVarint63(w, bh.Height)
+       if err != nil {
+               return err
+       }
+       _, err = bh.PreviousBlockHash.WriteTo(w)
+       if err != nil {
+               return err
+       }
+       _, err = blockchain.WriteVarint63(w, bh.TimestampMS)
+       if err != nil {
+               return err
+       }
+
+       _, err = blockchain.WriteExtensibleString(w, bh.CommitmentSuffix, bh.BlockCommitment.writeTo)
+       if err != nil {
+               return err
+       }
+
+       if serflags&SerBlockWitness == SerBlockWitness {
+               _, err = blockchain.WriteExtensibleString(w, bh.WitnessSuffix, bh.BlockWitness.writeTo)
+               if err != nil {
+                       return err
+               }
+       }
+
+       return nil
+}
diff --git a/protocol/bc/legacy/block_test.go b/protocol/bc/legacy/block_test.go
new file mode 100644 (file)
index 0000000..b14a409
--- /dev/null
@@ -0,0 +1,171 @@
+package legacy
+
+import (
+       "bytes"
+       "encoding/hex"
+       "encoding/json"
+       "testing"
+       "time"
+
+       "github.com/davecgh/go-spew/spew"
+
+       "chain/protocol/bc"
+       "chain/testutil"
+)
+
+func TestMarshalBlock(t *testing.T) {
+       b := &Block{
+               BlockHeader: BlockHeader{
+                       Version: 1,
+                       Height:  1,
+               },
+
+               Transactions: []*Tx{
+                       NewTx(TxData{
+                               Version: 1,
+                               Outputs: []*TxOutput{
+                                       NewTxOutput(bc.AssetID{}, 1, nil, nil),
+                               },
+                       }),
+               }}
+
+       got, err := json.Marshal(b)
+       if err != nil {
+               t.Errorf("unexpected error %s", err)
+       }
+
+       // Include start and end quote marks because json.Marshal adds them
+       // to the result of Block.MarshalText.
+       wantHex := ("\"03" + // serialization flags
+               "01" + // version
+               "01" + // block height
+               "0000000000000000000000000000000000000000000000000000000000000000" + // prev block hash
+               "00" + // timestamp
+               "41" + // commitment extensible field length
+               "0000000000000000000000000000000000000000000000000000000000000000" + // tx merkle root
+               "0000000000000000000000000000000000000000000000000000000000000000" + // assets merkle root
+               "00" + // consensus program
+               "01" + // witness extensible string length
+               "00" + // witness number of witness args
+               "01" + // num transactions
+               "07" + // tx 0, serialization flags
+               "01" + // tx 0, tx version
+               "02" + // tx 0, common fields extensible length string
+               "00" + // tx 0, common fields mintime
+               "00" + // tx 0, common fields maxtime
+               "00" + // tx 0, common witness extensible string length
+               "00" + // tx 0, inputs count
+               "01" + // tx 0, outputs count
+               "01" + // tx 0 output 0, asset version
+               "23" + // tx 0, output 0, output commitment length
+               "0000000000000000000000000000000000000000000000000000000000000000" + // tx 0, output 0 commitment, asset id
+               "01" + // tx 0, output 0 commitment, amount
+               "01" + // tx 0, output 0 commitment vm version
+               "00" + // tx 0, output 0 control program
+               "00" + // tx 0, output 0 reference data
+               "00" + // tx 0, output 0 output witness
+               "00\"") // tx 0 reference data
+
+       if !bytes.Equal(got, []byte(wantHex)) {
+               t.Errorf("marshaled block bytes = %s want %s", got, []byte(wantHex))
+       }
+
+       var c Block
+       err = json.Unmarshal(got, &c)
+       if err != nil {
+               t.Errorf("unexpected error %s", err)
+       }
+
+       if !testutil.DeepEqual(*b, c) {
+               t.Errorf("expected marshaled/unmarshaled block to be:\n%sgot:\n%s", spew.Sdump(*b), spew.Sdump(c))
+       }
+
+       got[7] = 'q'
+       err = json.Unmarshal(got, &c)
+       if err == nil {
+               t.Error("unmarshaled corrupted JSON ok, wanted error")
+       }
+}
+
+func TestEmptyBlock(t *testing.T) {
+       block := Block{
+               BlockHeader: BlockHeader{
+                       Version: 1,
+                       Height:  1,
+               },
+       }
+
+       got := serialize(t, &block)
+       wantHex := ("03" + // serialization flags
+               "01" + // version
+               "01" + // block height
+               "0000000000000000000000000000000000000000000000000000000000000000" + // prev block hash
+               "00" + // timestamp
+               "41" + // commitment extensible field length
+               "0000000000000000000000000000000000000000000000000000000000000000" + // transactions merkle root
+               "0000000000000000000000000000000000000000000000000000000000000000" + // assets merkle root
+               "00" + // consensus program
+               "01" + // witness extensible string length
+               "00" + // witness number of witness args
+               "00") // num transactions
+       want, _ := hex.DecodeString(wantHex)
+       if !bytes.Equal(got, want) {
+               t.Errorf("empty block bytes = %x want %x", got, want)
+       }
+
+       got = serialize(t, &block.BlockHeader)
+       wantHex = ("01" + // serialization flags
+               "01" + // version
+               "01" + // block height
+               "0000000000000000000000000000000000000000000000000000000000000000" + // prev block hash
+               "00" + // timestamp
+               "41" + // commitment extensible field length
+               "0000000000000000000000000000000000000000000000000000000000000000" + // transactions merkle root
+               "0000000000000000000000000000000000000000000000000000000000000000" + // assets merkle root
+               "00" + // consensus program
+               "01" + // witness extensible string length
+               "00") // witness number of witness args
+       want, _ = hex.DecodeString(wantHex)
+       if !bytes.Equal(got, want) {
+               t.Errorf("empty block header bytes = %x want %x", got, want)
+       }
+
+       wantHash := mustDecodeHash("6a73cbca99e33c8403d589664623c74df34dd6d7328ab6e7f27dd3e60d959850")
+       if h := block.Hash(); h != wantHash {
+               t.Errorf("got block hash %x, want %x", h.Bytes(), wantHash.Bytes())
+       }
+
+       wTime := time.Unix(0, 0).UTC()
+       if got := block.Time(); got != wTime {
+               t.Errorf("empty block time = %v want %v", got, wTime)
+       }
+}
+
+func TestSmallBlock(t *testing.T) {
+       block := Block{
+               BlockHeader: BlockHeader{
+                       Version: 1,
+                       Height:  1,
+               },
+               Transactions: []*Tx{NewTx(TxData{Version: CurrentTransactionVersion})},
+       }
+
+       got := serialize(t, &block)
+       wantHex := ("03" + // serialization flags
+               "01" + // version
+               "01" + // block height
+               "0000000000000000000000000000000000000000000000000000000000000000" + // prev block hash
+               "00" + // timestamp
+               "41" + // commitment extensible field length
+               "0000000000000000000000000000000000000000000000000000000000000000" + // transactions merkle root
+               "0000000000000000000000000000000000000000000000000000000000000000" + // assets merkle root
+               "00" + // consensus program
+               "01" + // witness extensible string length
+               "00" + // witness num witness args
+               "01" + // num transactions
+               "070102000000000000") // transaction
+       want, _ := hex.DecodeString(wantHex)
+       if !bytes.Equal(got, want) {
+               t.Errorf("small block bytes = %x want %x", got, want)
+       }
+}
diff --git a/protocol/bc/legacy/block_witness.go b/protocol/bc/legacy/block_witness.go
new file mode 100644 (file)
index 0000000..5ee6d30
--- /dev/null
@@ -0,0 +1,18 @@
+package legacy
+
+import (
+       "io"
+
+       "chain/encoding/blockchain"
+)
+
+type BlockWitness struct {
+       // Witness is a vector of arguments to the previous block's
+       // ConsensusProgram for validating this block.
+       Witness [][]byte
+}
+
+func (bw *BlockWitness) writeTo(w io.Writer) error {
+       _, err := blockchain.WriteVarstrList(w, bw.Witness)
+       return err
+}
diff --git a/protocol/bc/legacy/fuzz_test.go b/protocol/bc/legacy/fuzz_test.go
new file mode 100644 (file)
index 0000000..4a05668
--- /dev/null
@@ -0,0 +1,28 @@
+package legacy
+
+import "testing"
+
+func TestFuzzUnknownAssetVersion(t *testing.T) {
+       const rawTx = `07010700f785c1f1b72b0001f1b72b0001012b00089def834ab929327f3f479177e2d8c293f2f7fc4f251db8547896c0eeafb984261a73767178584c246400b50150935a092ffad7ec9fbac4f4486db6c3b8cd5b9f51cf697248584dde286a722000012b766baa20627e83fdad13dd98436fa7cbdd1412d50ef65528edb7e2ed8f2675b2a0b209235151ad696c00c0030040b984261ad6e71876ec4c2464012b766baa209d44ee5b6ebf6c408772ead7713f1a66b9de7655ff452513487be1fb10de7d985151ad696c00c02a7b2274657374223a225175657279546573742e7465737442616c616e636551756572792e74657374227d`
+
+       var want Tx
+       err := want.UnmarshalText([]byte(rawTx))
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       b, err := want.MarshalText()
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // Make sure serialzing and deserializing gives the same tx
+       var got Tx
+       err = got.UnmarshalText(b)
+       if err != nil {
+               t.Fatal(err)
+       }
+       if got.ID.String() != want.ID.String() {
+               t.Errorf("tx id changed to %s", got.ID.String())
+       }
+}
diff --git a/protocol/bc/legacy/issuance.go b/protocol/bc/legacy/issuance.go
new file mode 100644 (file)
index 0000000..6a75573
--- /dev/null
@@ -0,0 +1,61 @@
+package legacy
+
+import (
+       "chain/crypto/sha3pool"
+       "chain/protocol/bc"
+)
+
+type IssuanceInput struct {
+       // Commitment
+       Nonce  []byte
+       Amount uint64
+       // Note: as long as we require serflags=0x7, we don't need to
+       // explicitly store the asset ID here even though it's technically
+       // part of the input commitment. We can compute it instead from
+       // values in the witness (which, with serflags other than 0x7,
+       // might not be present).
+
+       // Witness
+       IssuanceWitness
+}
+
+func (ii *IssuanceInput) IsIssuance() bool { return true }
+
+func (ii *IssuanceInput) AssetID() bc.AssetID {
+       defhash := ii.AssetDefinitionHash()
+       return bc.ComputeAssetID(ii.IssuanceProgram, &ii.InitialBlock, ii.VMVersion, &defhash)
+}
+
+func (ii *IssuanceInput) AssetDefinitionHash() (defhash bc.Hash) {
+       sha := sha3pool.Get256()
+       defer sha3pool.Put256(sha)
+       sha.Write(ii.AssetDefinition)
+       defhash.ReadFrom(sha)
+       return defhash
+}
+
+func NewIssuanceInput(
+       nonce []byte,
+       amount uint64,
+       referenceData []byte,
+       initialBlock bc.Hash,
+       issuanceProgram []byte,
+       arguments [][]byte,
+       assetDefinition []byte,
+) *TxInput {
+       return &TxInput{
+               AssetVersion:  1,
+               ReferenceData: referenceData,
+               TypedInput: &IssuanceInput{
+                       Nonce:  nonce,
+                       Amount: amount,
+                       IssuanceWitness: IssuanceWitness{
+                               InitialBlock:    initialBlock,
+                               AssetDefinition: assetDefinition,
+                               VMVersion:       1,
+                               IssuanceProgram: issuanceProgram,
+                               Arguments:       arguments,
+                       },
+               },
+       }
+}
diff --git a/protocol/bc/legacy/issuance_witness.go b/protocol/bc/legacy/issuance_witness.go
new file mode 100644 (file)
index 0000000..a876e30
--- /dev/null
@@ -0,0 +1,11 @@
+package legacy
+
+import "chain/protocol/bc"
+
+type IssuanceWitness struct {
+       InitialBlock    bc.Hash
+       AssetDefinition []byte
+       VMVersion       uint64
+       IssuanceProgram []byte
+       Arguments       [][]byte
+}
diff --git a/protocol/bc/legacy/map.go b/protocol/bc/legacy/map.go
new file mode 100644 (file)
index 0000000..c20e80a
--- /dev/null
@@ -0,0 +1,242 @@
+package legacy
+
+import (
+       "chain/crypto/sha3pool"
+       "chain/protocol/bc"
+       "chain/protocol/vm"
+       "chain/protocol/vm/vmutil"
+)
+
+// MapTx converts a legacy TxData object into its entries-based
+// representation.
+func MapTx(oldTx *TxData) *bc.Tx {
+       txid, header, entries := mapTx(oldTx)
+
+       tx := &bc.Tx{
+               TxHeader: header,
+               ID:       txid,
+               Entries:  entries,
+               InputIDs: make([]bc.Hash, len(oldTx.Inputs)),
+       }
+
+       var (
+               nonceIDs       = make(map[bc.Hash]bool)
+               spentOutputIDs = make(map[bc.Hash]bool)
+       )
+       for id, e := range entries {
+               var ord uint64
+               switch e := e.(type) {
+               case *bc.Issuance:
+                       anchor, ok := entries[*e.AnchorId]
+                       if !ok {
+                               // this tx will be invalid because this issuance is
+                               // missing an anchor
+                               continue
+                       }
+                       if _, ok := anchor.(*bc.Nonce); ok {
+                               nonceIDs[*e.AnchorId] = true
+                       }
+                       ord = e.Ordinal
+                       // resume below after the switch
+
+               case *bc.Spend:
+                       spentOutputIDs[*e.SpentOutputId] = true
+                       ord = e.Ordinal
+                       // resume below after the switch
+
+               default:
+                       continue
+               }
+               if ord >= uint64(len(oldTx.Inputs)) {
+                       continue // poorly-formed transaction
+               }
+               tx.InputIDs[ord] = id
+       }
+
+       for id := range nonceIDs {
+               tx.NonceIDs = append(tx.NonceIDs, id)
+       }
+       for id := range spentOutputIDs {
+               tx.SpentOutputIDs = append(tx.SpentOutputIDs, id)
+       }
+       return tx
+}
+
+func mapTx(tx *TxData) (headerID bc.Hash, hdr *bc.TxHeader, entryMap map[bc.Hash]bc.Entry) {
+       entryMap = make(map[bc.Hash]bc.Entry)
+
+       addEntry := func(e bc.Entry) bc.Hash {
+               id := bc.EntryID(e)
+               entryMap[id] = e
+               return id
+       }
+
+       // Loop twice over tx.Inputs, once for spends and once for
+       // issuances.  Do spends first so the entry ID of the first spend is
+       // available in case an issuance needs it for its anchor.
+
+       var (
+               firstSpend   *bc.Spend
+               firstSpendID bc.Hash
+               spends       []*bc.Spend
+               issuances    []*bc.Issuance
+               muxSources   = make([]*bc.ValueSource, len(tx.Inputs))
+       )
+
+       for i, inp := range tx.Inputs {
+               if oldSp, ok := inp.TypedInput.(*SpendInput); ok {
+                       prog := &bc.Program{VmVersion: oldSp.VMVersion, Code: oldSp.ControlProgram}
+                       src := &bc.ValueSource{
+                               Ref:      &oldSp.SourceID,
+                               Value:    &oldSp.AssetAmount,
+                               Position: oldSp.SourcePosition,
+                       }
+                       out := bc.NewOutput(src, prog, &oldSp.RefDataHash, 0) // ordinal doesn't matter for prevouts, only for result outputs
+                       prevoutID := addEntry(out)
+                       refdatahash := hashData(inp.ReferenceData)
+                       sp := bc.NewSpend(&prevoutID, &refdatahash, uint64(i))
+                       sp.WitnessArguments = oldSp.Arguments
+                       id := addEntry(sp)
+                       muxSources[i] = &bc.ValueSource{
+                               Ref:   &id,
+                               Value: &oldSp.AssetAmount,
+                       }
+                       if firstSpend == nil {
+                               firstSpend = sp
+                               firstSpendID = id
+                       }
+                       spends = append(spends, sp)
+               }
+       }
+
+       for i, inp := range tx.Inputs {
+               if oldIss, ok := inp.TypedInput.(*IssuanceInput); ok {
+                       // Note: asset definitions, initial block ids, and issuance
+                       // programs are omitted here because they do not contribute to
+                       // the body hash of an issuance.
+
+                       var (
+                               anchorID    bc.Hash
+                               setAnchored = func(*bc.Hash) {}
+                       )
+
+                       if len(oldIss.Nonce) > 0 {
+                               tr := bc.NewTimeRange(tx.MinTime, tx.MaxTime)
+                               trID := addEntry(tr)
+                               assetID := oldIss.AssetID()
+
+                               builder := vmutil.NewBuilder()
+                               builder.AddData(oldIss.Nonce).AddOp(vm.OP_DROP)
+                               builder.AddOp(vm.OP_ASSET).AddData(assetID.Bytes()).AddOp(vm.OP_EQUAL)
+                               prog, _ := builder.Build() // error is impossible
+
+                               nonce := bc.NewNonce(&bc.Program{VmVersion: 1, Code: prog}, &trID)
+                               anchorID = addEntry(nonce)
+                               setAnchored = nonce.SetAnchored
+                       } else if firstSpend != nil {
+                               anchorID = firstSpendID
+                               setAnchored = firstSpend.SetAnchored
+                       }
+
+                       val := inp.AssetAmount()
+
+                       refdatahash := hashData(inp.ReferenceData)
+                       assetdefhash := hashData(oldIss.AssetDefinition)
+                       iss := bc.NewIssuance(&anchorID, &val, &refdatahash, uint64(i))
+                       iss.WitnessAssetDefinition = &bc.AssetDefinition{
+                               InitialBlockId: &oldIss.InitialBlock,
+                               Data:           &assetdefhash,
+                               IssuanceProgram: &bc.Program{
+                                       VmVersion: oldIss.VMVersion,
+                                       Code:      oldIss.IssuanceProgram,
+                               },
+                       }
+                       iss.WitnessArguments = oldIss.Arguments
+                       issID := addEntry(iss)
+                       setAnchored(&issID)
+
+                       muxSources[i] = &bc.ValueSource{
+                               Ref:   &issID,
+                               Value: &val,
+                       }
+                       issuances = append(issuances, iss)
+               }
+       }
+
+       mux := bc.NewMux(muxSources, &bc.Program{VmVersion: 1, Code: []byte{byte(vm.OP_TRUE)}})
+       muxID := addEntry(mux)
+
+       for _, sp := range spends {
+               spentOutput := entryMap[*sp.SpentOutputId].(*bc.Output)
+               sp.SetDestination(&muxID, spentOutput.Source.Value, sp.Ordinal)
+       }
+       for _, iss := range issuances {
+               iss.SetDestination(&muxID, iss.Value, iss.Ordinal)
+       }
+
+       var resultIDs []*bc.Hash
+
+       for i, out := range tx.Outputs {
+               src := &bc.ValueSource{
+                       Ref:      &muxID,
+                       Value:    &out.AssetAmount,
+                       Position: uint64(i),
+               }
+               var dest *bc.ValueDestination
+               if vmutil.IsUnspendable(out.ControlProgram) {
+                       // retirement
+                       refdatahash := hashData(out.ReferenceData)
+                       r := bc.NewRetirement(src, &refdatahash, uint64(i))
+                       rID := addEntry(r)
+                       resultIDs = append(resultIDs, &rID)
+                       dest = &bc.ValueDestination{
+                               Ref:      &rID,
+                               Position: 0,
+                       }
+               } else {
+                       // non-retirement
+                       prog := &bc.Program{out.VMVersion, out.ControlProgram}
+                       refdatahash := hashData(out.ReferenceData)
+                       o := bc.NewOutput(src, prog, &refdatahash, uint64(i))
+                       oID := addEntry(o)
+                       resultIDs = append(resultIDs, &oID)
+                       dest = &bc.ValueDestination{
+                               Ref:      &oID,
+                               Position: 0,
+                       }
+               }
+               dest.Value = src.Value
+               mux.WitnessDestinations = append(mux.WitnessDestinations, dest)
+       }
+
+       refdatahash := hashData(tx.ReferenceData)
+       h := bc.NewTxHeader(tx.Version, resultIDs, &refdatahash, tx.MinTime, tx.MaxTime)
+       headerID = addEntry(h)
+
+       return headerID, h, entryMap
+}
+
+func mapBlockHeader(old *BlockHeader) (bhID bc.Hash, bh *bc.BlockHeader) {
+       bh = bc.NewBlockHeader(old.Version, old.Height, &old.PreviousBlockHash, old.TimestampMS, &old.TransactionsMerkleRoot, &old.AssetsMerkleRoot, old.ConsensusProgram)
+       bh.WitnessArguments = old.Witness
+       bhID = bc.EntryID(bh)
+       return
+}
+
+func MapBlock(old *Block) *bc.Block {
+       if old == nil {
+               return nil // if old is nil, so should new be
+       }
+       b := new(bc.Block)
+       b.ID, b.BlockHeader = mapBlockHeader(&old.BlockHeader)
+       for _, oldTx := range old.Transactions {
+               b.Transactions = append(b.Transactions, oldTx.Tx)
+       }
+       return b
+}
+
+func hashData(data []byte) bc.Hash {
+       var b32 [32]byte
+       sha3pool.Sum256(b32[:], data)
+       return bc.NewHash(b32)
+}
diff --git a/protocol/bc/legacy/map_test.go b/protocol/bc/legacy/map_test.go
new file mode 100644 (file)
index 0000000..895ed31
--- /dev/null
@@ -0,0 +1,61 @@
+package legacy
+
+import (
+       "bytes"
+       "testing"
+
+       "github.com/davecgh/go-spew/spew"
+
+       "chain/protocol/bc"
+)
+
+func TestMapTx(t *testing.T) {
+       // sample data copied from transaction_test.go
+       // TODO(bobg): factor out into reusable test utility
+
+       oldTx := sampleTx()
+       oldOuts := oldTx.Outputs
+
+       _, header, entryMap := mapTx(oldTx)
+       t.Log(spew.Sdump(entryMap))
+
+       if header.Version != 1 {
+               t.Errorf("header.Version is %d, expected 1", header.Version)
+       }
+       if header.MinTimeMs != oldTx.MinTime {
+               t.Errorf("header.MinTimeMs is %d, expected %d", header.MinTimeMs, oldTx.MinTime)
+       }
+       if header.MaxTimeMs != oldTx.MaxTime {
+               t.Errorf("header.MaxTimeMs is %d, expected %d", header.MaxTimeMs, oldTx.MaxTime)
+       }
+       if len(header.ResultIds) != len(oldOuts) {
+               t.Errorf("header.ResultIds contains %d item(s), expected %d", len(header.ResultIds), len(oldOuts))
+       }
+
+       for i, oldOut := range oldOuts {
+               if resultEntry, ok := entryMap[*header.ResultIds[i]]; ok {
+                       if newOut, ok := resultEntry.(*bc.Output); ok {
+                               if *newOut.Source.Value != oldOut.AssetAmount {
+                                       t.Errorf("header.ResultIds[%d].(*output).Source is %v, expected %v", i, newOut.Source.Value, oldOut.AssetAmount)
+                               }
+                               if newOut.ControlProgram.VmVersion != 1 {
+                                       t.Errorf("header.ResultIds[%d].(*output).ControlProgram.VMVersion is %d, expected 1", i, newOut.ControlProgram.VmVersion)
+                               }
+                               if !bytes.Equal(newOut.ControlProgram.Code, oldOut.ControlProgram) {
+                                       t.Errorf("header.ResultIds[%d].(*output).ControlProgram.Code is %x, expected %x", i, newOut.ControlProgram.Code, oldOut.ControlProgram)
+                               }
+                               if *newOut.Data != hashData(oldOut.ReferenceData) {
+                                       want := hashData(oldOut.ReferenceData)
+                                       t.Errorf("header.ResultIds[%d].(*output).Data is %x, expected %x", i, newOut.Data.Bytes(), want.Bytes())
+                               }
+                               if !newOut.ExtHash.IsZero() {
+                                       t.Errorf("header.ResultIds[%d].(*output).ExtHash is %x, expected zero", i, newOut.ExtHash.Bytes())
+                               }
+                       } else {
+                               t.Errorf("header.ResultIds[%d] has type %T, expected *Output", i, resultEntry)
+                       }
+               } else {
+                       t.Errorf("entryMap contains nothing for header.ResultIds[%d] (%x)", i, header.ResultIds[i].Bytes())
+               }
+       }
+}
diff --git a/protocol/bc/legacy/output_commitment.go b/protocol/bc/legacy/output_commitment.go
new file mode 100644 (file)
index 0000000..8efe6e0
--- /dev/null
@@ -0,0 +1,79 @@
+package legacy
+
+import (
+       "fmt"
+       "io"
+
+       "chain/crypto/sha3pool"
+       "chain/encoding/blockchain"
+       "chain/errors"
+       "chain/protocol/bc"
+)
+
+// OutputCommitment contains the commitment data for a transaction
+// output (which also appears in the spend input of that output).
+type OutputCommitment struct {
+       bc.AssetAmount
+       VMVersion      uint64
+       ControlProgram []byte
+}
+
+func (oc *OutputCommitment) writeExtensibleString(w io.Writer, suffix []byte, assetVersion uint64) error {
+       _, err := blockchain.WriteExtensibleString(w, suffix, func(w io.Writer) error {
+               return oc.writeContents(w, suffix, assetVersion)
+       })
+       return err
+}
+
+func (oc *OutputCommitment) writeContents(w io.Writer, suffix []byte, assetVersion uint64) (err error) {
+       if assetVersion == 1 {
+               _, err = oc.AssetAmount.WriteTo(w)
+               if err != nil {
+                       return errors.Wrap(err, "writing asset amount")
+               }
+               _, err = blockchain.WriteVarint63(w, oc.VMVersion)
+               if err != nil {
+                       return errors.Wrap(err, "writing vm version")
+               }
+               _, err = blockchain.WriteVarstr31(w, oc.ControlProgram)
+               if err != nil {
+                       return errors.Wrap(err, "writing control program")
+               }
+       }
+       if len(suffix) > 0 {
+               _, err = w.Write(suffix)
+               if err != nil {
+                       return errors.Wrap(err, "writing suffix")
+               }
+       }
+       return nil
+}
+
+func (oc *OutputCommitment) readFrom(r *blockchain.Reader, assetVersion uint64) (suffix []byte, err error) {
+       return blockchain.ReadExtensibleString(r, func(r *blockchain.Reader) error {
+               if assetVersion == 1 {
+                       err := oc.AssetAmount.ReadFrom(r)
+                       if err != nil {
+                               return errors.Wrap(err, "reading asset+amount")
+                       }
+                       oc.VMVersion, err = blockchain.ReadVarint63(r)
+                       if err != nil {
+                               return errors.Wrap(err, "reading VM version")
+                       }
+                       if oc.VMVersion != 1 {
+                               return fmt.Errorf("unrecognized VM version %d for asset version 1", oc.VMVersion)
+                       }
+                       oc.ControlProgram, err = blockchain.ReadVarstr31(r)
+                       return errors.Wrap(err, "reading control program")
+               }
+               return nil
+       })
+}
+
+func (oc *OutputCommitment) Hash(suffix []byte, assetVersion uint64) (outputhash bc.Hash) {
+       h := sha3pool.Get256()
+       defer sha3pool.Put256(h)
+       oc.writeExtensibleString(h, suffix, assetVersion) // TODO(oleg): get rid of this assetVersion parameter to actually write all the bytes
+       outputhash.ReadFrom(h)
+       return outputhash
+}
diff --git a/protocol/bc/legacy/spend.go b/protocol/bc/legacy/spend.go
new file mode 100644 (file)
index 0000000..00f3ec6
--- /dev/null
@@ -0,0 +1,149 @@
+package legacy
+
+import (
+       "fmt"
+       "io"
+
+       "chain/crypto/sha3pool"
+       "chain/encoding/blockchain"
+       "chain/errors"
+       "chain/protocol/bc"
+)
+
+// SpendInput satisfies the TypedInput interface and represents a spend transaction.
+type SpendInput struct {
+       // Commitment
+       SpendCommitment
+
+       // The unconsumed suffix of the output commitment
+       SpendCommitmentSuffix []byte
+
+       // Witness
+       Arguments [][]byte
+}
+
+func (si *SpendInput) IsIssuance() bool { return false }
+
+func NewSpendInput(arguments [][]byte, sourceID bc.Hash, assetID bc.AssetID, amount uint64, sourcePos uint64, controlProgram []byte, outRefDataHash bc.Hash, referenceData []byte) *TxInput {
+       const (
+               vmver    = 1
+               assetver = 1
+       )
+       sc := SpendCommitment{
+               AssetAmount: bc.AssetAmount{
+                       AssetId: &assetID,
+                       Amount:  amount,
+               },
+               SourceID:       sourceID,
+               SourcePosition: sourcePos,
+               VMVersion:      vmver,
+               ControlProgram: controlProgram,
+               RefDataHash:    outRefDataHash,
+       }
+       return &TxInput{
+               AssetVersion:  assetver,
+               ReferenceData: referenceData,
+               TypedInput: &SpendInput{
+                       SpendCommitment: sc,
+                       Arguments:       arguments,
+               },
+       }
+}
+
+// SpendCommitment contains the commitment data for a transaction
+// output (which also appears in the spend input of that output).
+type SpendCommitment struct {
+       bc.AssetAmount
+       SourceID       bc.Hash
+       SourcePosition uint64
+       VMVersion      uint64
+       ControlProgram []byte
+       RefDataHash    bc.Hash
+}
+
+func (sc *SpendCommitment) writeExtensibleString(w io.Writer, suffix []byte, assetVersion uint64) error {
+       _, err := blockchain.WriteExtensibleString(w, suffix, func(w io.Writer) error {
+               return sc.writeContents(w, suffix, assetVersion)
+       })
+       return err
+}
+
+func (sc *SpendCommitment) writeContents(w io.Writer, suffix []byte, assetVersion uint64) (err error) {
+       if assetVersion == 1 {
+               _, err = sc.SourceID.WriteTo(w)
+               if err != nil {
+                       return errors.Wrap(err, "writing source id")
+               }
+               _, err = sc.AssetAmount.WriteTo(w)
+               if err != nil {
+                       return errors.Wrap(err, "writing asset amount")
+               }
+               _, err = blockchain.WriteVarint63(w, sc.SourcePosition)
+               if err != nil {
+                       return errors.Wrap(err, "writing source position")
+               }
+               _, err = blockchain.WriteVarint63(w, sc.VMVersion)
+               if err != nil {
+                       return errors.Wrap(err, "writing vm version")
+               }
+               _, err = blockchain.WriteVarstr31(w, sc.ControlProgram)
+               if err != nil {
+                       return errors.Wrap(err, "writing control program")
+               }
+               _, err = sc.RefDataHash.WriteTo(w)
+               if err != nil {
+                       return errors.Wrap(err, "writing reference data hash")
+               }
+       }
+       if len(suffix) > 0 {
+               _, err = w.Write(suffix)
+               if err != nil {
+                       return errors.Wrap(err, "writing suffix")
+               }
+       }
+       return nil
+}
+
+func (sc *SpendCommitment) readFrom(r *blockchain.Reader, assetVersion uint64) (suffix []byte, err error) {
+       return blockchain.ReadExtensibleString(r, func(r *blockchain.Reader) error {
+               if assetVersion == 1 {
+                       _, err := sc.SourceID.ReadFrom(r)
+                       if err != nil {
+                               return errors.Wrap(err, "reading source id")
+                       }
+                       err = sc.AssetAmount.ReadFrom(r)
+                       if err != nil {
+                               return errors.Wrap(err, "reading asset+amount")
+                       }
+                       sc.SourcePosition, err = blockchain.ReadVarint63(r)
+                       if err != nil {
+                               return errors.Wrap(err, "reading source position")
+                       }
+                       sc.VMVersion, err = blockchain.ReadVarint63(r)
+                       if err != nil {
+                               return errors.Wrap(err, "reading VM version")
+                       }
+                       if sc.VMVersion != 1 {
+                               return fmt.Errorf("unrecognized VM version %d for asset version 1", sc.VMVersion)
+                       }
+                       sc.ControlProgram, err = blockchain.ReadVarstr31(r)
+                       if err != nil {
+                               return errors.Wrap(err, "reading control program")
+                       }
+                       _, err = sc.RefDataHash.ReadFrom(r)
+                       if err != nil {
+                               return errors.Wrap(err, "reading reference data hash")
+                       }
+                       return nil
+               }
+               return nil
+       })
+}
+
+func (sc *SpendCommitment) Hash(suffix []byte, assetVersion uint64) (spendhash bc.Hash) {
+       h := sha3pool.Get256()
+       defer sha3pool.Put256(h)
+       sc.writeExtensibleString(h, suffix, assetVersion) // TODO(oleg): get rid of this assetVersion parameter to actually write all the bytes
+       spendhash.ReadFrom(h)
+       return spendhash
+}
diff --git a/protocol/bc/legacy/transaction.go b/protocol/bc/legacy/transaction.go
new file mode 100644 (file)
index 0000000..843ac49
--- /dev/null
@@ -0,0 +1,293 @@
+package legacy
+
+import (
+       "bytes"
+       "encoding/hex"
+       "fmt"
+       "io"
+
+       "chain/crypto/sha3pool"
+       "chain/encoding/blockchain"
+       "chain/errors"
+       "chain/protocol/bc"
+)
+
+// CurrentTransactionVersion is the current latest
+// supported transaction version.
+const CurrentTransactionVersion = 1
+
+// Tx holds a transaction along with its hash.
+type Tx struct {
+       TxData
+       *bc.Tx `json:"-"`
+}
+
+func (tx *Tx) UnmarshalText(p []byte) error {
+       if err := tx.TxData.UnmarshalText(p); err != nil {
+               return err
+       }
+
+       tx.Tx = MapTx(&tx.TxData)
+       return nil
+}
+
+// SetInputArguments sets the Arguments field in input n.
+func (tx *Tx) SetInputArguments(n uint32, args [][]byte) {
+       tx.Inputs[n].SetArguments(args)
+       id := tx.Tx.InputIDs[n]
+       e := tx.Entries[id]
+       switch e := e.(type) {
+       case *bc.Issuance:
+               e.WitnessArguments = args
+       case *bc.Spend:
+               e.WitnessArguments = args
+       }
+}
+
+func (tx *Tx) IssuanceHash(n int) bc.Hash {
+       return tx.Tx.InputIDs[n]
+}
+
+func (tx *Tx) OutputID(outputIndex int) *bc.Hash {
+       return tx.ResultIds[outputIndex]
+}
+
+// NewTx returns a new Tx containing data and its hash.
+// If you have already computed the hash, use struct literal
+// notation to make a Tx object directly.
+func NewTx(data TxData) *Tx {
+       return &Tx{
+               TxData: data,
+               Tx:     MapTx(&data),
+       }
+}
+
+// These flags are part of the wire protocol;
+// they must not change.
+const (
+       SerWitness uint8 = 1 << iota
+       SerPrevout
+       SerMetadata
+
+       // Bit mask for accepted serialization flags.
+       // All other flag bits must be 0.
+       SerTxHash   = 0x0 // this is used only for computing transaction hash - prevout and refdata are replaced with their hashes
+       SerValid    = 0x7
+       serRequired = 0x7 // we support only this combination of flags
+)
+
+// TxData encodes a transaction in the blockchain.
+// Most users will want to use Tx instead;
+// it includes the hash.
+type TxData struct {
+       Version uint64
+       Inputs  []*TxInput
+       Outputs []*TxOutput
+
+       // Common fields
+       MinTime uint64
+       MaxTime uint64
+
+       // The unconsumed suffix of the common fields extensible string
+       CommonFieldsSuffix []byte
+
+       // The unconsumed suffix of the common witness extensible string
+       CommonWitnessSuffix []byte
+
+       ReferenceData []byte
+}
+
+// HasIssuance returns true if this transaction has an issuance input.
+func (tx *TxData) HasIssuance() bool {
+       for _, in := range tx.Inputs {
+               if in.IsIssuance() {
+                       return true
+               }
+       }
+       return false
+}
+
+func (tx *TxData) UnmarshalText(p []byte) error {
+       b := make([]byte, hex.DecodedLen(len(p)))
+       _, err := hex.Decode(b, p)
+       if err != nil {
+               return err
+       }
+
+       r := blockchain.NewReader(b)
+       err = tx.readFrom(r)
+       if err != nil {
+               return err
+       }
+       if trailing := r.Len(); trailing > 0 {
+               return fmt.Errorf("trailing garbage (%d bytes)", trailing)
+       }
+       return nil
+}
+
+func (tx *TxData) readFrom(r *blockchain.Reader) error {
+       var serflags [1]byte
+       _, err := io.ReadFull(r, serflags[:])
+       if err != nil {
+               return errors.Wrap(err, "reading serialization flags")
+       }
+       if err == nil && serflags[0] != serRequired {
+               return fmt.Errorf("unsupported serflags %#x", serflags[0])
+       }
+
+       tx.Version, err = blockchain.ReadVarint63(r)
+       if err != nil {
+               return errors.Wrap(err, "reading transaction version")
+       }
+
+       // Common fields
+       tx.CommonFieldsSuffix, err = blockchain.ReadExtensibleString(r, func(r *blockchain.Reader) error {
+               tx.MinTime, err = blockchain.ReadVarint63(r)
+               if err != nil {
+                       return errors.Wrap(err, "reading transaction mintime")
+               }
+               tx.MaxTime, err = blockchain.ReadVarint63(r)
+               return errors.Wrap(err, "reading transaction maxtime")
+       })
+       if err != nil {
+               return errors.Wrap(err, "reading transaction common fields")
+       }
+
+       // Common witness
+       tx.CommonWitnessSuffix, err = blockchain.ReadExtensibleString(r, tx.readCommonWitness)
+       if err != nil {
+               return errors.Wrap(err, "reading transaction common witness")
+       }
+
+       n, err := blockchain.ReadVarint31(r)
+       if err != nil {
+               return errors.Wrap(err, "reading number of transaction inputs")
+       }
+       for ; n > 0; n-- {
+               ti := new(TxInput)
+               err = ti.readFrom(r)
+               if err != nil {
+                       return errors.Wrapf(err, "reading input %d", len(tx.Inputs))
+               }
+               tx.Inputs = append(tx.Inputs, ti)
+       }
+
+       n, err = blockchain.ReadVarint31(r)
+       if err != nil {
+               return errors.Wrap(err, "reading number of transaction outputs")
+       }
+       for ; n > 0; n-- {
+               to := new(TxOutput)
+               err = to.readFrom(r, tx.Version)
+               if err != nil {
+                       return errors.Wrapf(err, "reading output %d", len(tx.Outputs))
+               }
+               tx.Outputs = append(tx.Outputs, to)
+       }
+
+       tx.ReferenceData, err = blockchain.ReadVarstr31(r)
+       return errors.Wrap(err, "reading transaction reference data")
+}
+
+// does not read the enclosing extensible string
+func (tx *TxData) readCommonWitness(r *blockchain.Reader) error {
+       return nil
+}
+
+func (tx *TxData) MarshalText() ([]byte, error) {
+       var buf bytes.Buffer
+       tx.WriteTo(&buf) // error is impossible
+       b := make([]byte, hex.EncodedLen(buf.Len()))
+       hex.Encode(b, buf.Bytes())
+       return b, nil
+}
+
+// WriteTo writes tx to w.
+func (tx *TxData) WriteTo(w io.Writer) (int64, error) {
+       ew := errors.NewWriter(w)
+       err := tx.writeTo(ew, serRequired)
+       if err != nil {
+               return ew.Written(), ew.Err()
+       }
+       return ew.Written(), ew.Err()
+}
+
+func (tx *TxData) writeTo(w io.Writer, serflags byte) error {
+       _, err := w.Write([]byte{serflags})
+       if err != nil {
+               return errors.Wrap(err, "writing serialization flags")
+       }
+
+       _, err = blockchain.WriteVarint63(w, tx.Version)
+       if err != nil {
+               return errors.Wrap(err, "writing transaction version")
+       }
+
+       // common fields
+       _, err = blockchain.WriteExtensibleString(w, tx.CommonFieldsSuffix, func(w io.Writer) error {
+               _, err := blockchain.WriteVarint63(w, tx.MinTime)
+               if err != nil {
+                       return errors.Wrap(err, "writing transaction min time")
+               }
+               _, err = blockchain.WriteVarint63(w, tx.MaxTime)
+               return errors.Wrap(err, "writing transaction max time")
+       })
+       if err != nil {
+               return errors.Wrap(err, "writing common fields")
+       }
+
+       // common witness
+       _, err = blockchain.WriteExtensibleString(w, tx.CommonWitnessSuffix, tx.writeCommonWitness)
+       if err != nil {
+               return errors.Wrap(err, "writing common witness")
+       }
+
+       _, err = blockchain.WriteVarint31(w, uint64(len(tx.Inputs)))
+       if err != nil {
+               return errors.Wrap(err, "writing tx input count")
+       }
+       for i, ti := range tx.Inputs {
+               err = ti.writeTo(w, serflags)
+               if err != nil {
+                       return errors.Wrapf(err, "writing tx input %d", i)
+               }
+       }
+
+       _, err = blockchain.WriteVarint31(w, uint64(len(tx.Outputs)))
+       if err != nil {
+               return errors.Wrap(err, "writing tx output count")
+       }
+       for i, to := range tx.Outputs {
+               err = to.writeTo(w, serflags)
+               if err != nil {
+                       return errors.Wrapf(err, "writing tx output %d", i)
+               }
+       }
+
+       return writeRefData(w, tx.ReferenceData, serflags)
+}
+
+// does not write the enclosing extensible string
+func (tx *TxData) writeCommonWitness(w io.Writer) error {
+       // Future protocol versions may add fields here.
+       return nil
+}
+
+func writeRefData(w io.Writer, data []byte, serflags byte) error {
+       if serflags&SerMetadata != 0 {
+               _, err := blockchain.WriteVarstr31(w, data)
+               return err
+       }
+       return writeFastHash(w, data)
+}
+
+func writeFastHash(w io.Writer, d []byte) error {
+       if len(d) == 0 {
+               _, err := blockchain.WriteVarstr31(w, nil)
+               return err
+       }
+       var h [32]byte
+       sha3pool.Sum256(h[:], d)
+       _, err := blockchain.WriteVarstr31(w, h[:])
+       return err
+}
diff --git a/protocol/bc/legacy/transaction_test.go b/protocol/bc/legacy/transaction_test.go
new file mode 100644 (file)
index 0000000..505d073
--- /dev/null
@@ -0,0 +1,396 @@
+package legacy
+
+import (
+       "bytes"
+       "encoding/hex"
+       "encoding/json"
+       "io/ioutil"
+       "strings"
+       "testing"
+
+       "github.com/davecgh/go-spew/spew"
+
+       "chain/errors"
+       "chain/protocol/bc"
+       "chain/testutil"
+)
+
+func TestTransactionTrailingGarbage(t *testing.T) {
+       const validTxHex = `07010700d0929893b92b00000101270eac870dfde1e0feaa4fac6693dee38da2afe7f5cc83ce2b024f04a2400fd6e20a0104deadbeef027b7d0000`
+
+       var validTx Tx
+       err := validTx.UnmarshalText([]byte(validTxHex))
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       invalidTxHex := validTxHex + strings.Repeat("beef", 10)
+       var invalidTx Tx
+       err = invalidTx.UnmarshalText([]byte(invalidTxHex))
+       if err == nil {
+               t.Fatal("expected error with trailing garbage but got nil")
+       }
+}
+
+func TestTransaction(t *testing.T) {
+       issuanceScript := []byte{1}
+       initialBlockHashHex := "03deff1d4319d67baa10a6d26c1fea9c3e8d30e33474efee1a610a9bb49d758d"
+       initialBlockHash := mustDecodeHash(initialBlockHashHex)
+
+       assetID := bc.ComputeAssetID(issuanceScript, &initialBlockHash, 1, &bc.EmptyStringHash)
+
+       cases := []struct {
+               tx   *Tx
+               hex  string
+               hash bc.Hash
+       }{
+               {
+                       tx: NewTx(TxData{
+                               Version:       1,
+                               Inputs:        nil,
+                               Outputs:       nil,
+                               MinTime:       0,
+                               MaxTime:       0,
+                               ReferenceData: nil,
+                       }),
+                       hex: ("07" + // serflags
+                               "01" + // transaction version
+                               "02" + // common fields extensible string length
+                               "00" + // common fields, mintime
+                               "00" + // common fields, maxtime
+                               "00" + // common witness extensible string length
+                               "00" + // inputs count
+                               "00" + // outputs count
+                               "00"), // reference data
+                       hash: mustDecodeHash("7ae6eef6b02fe61d35cc185405aec5f690ccb0ac291ecd6214445a1dff8fc9fd"),
+               },
+               {
+                       tx: NewTx(TxData{
+                               Version: 1,
+                               Inputs: []*TxInput{
+                                       NewIssuanceInput([]byte{10, 9, 8}, 1000000000000, []byte("input"), initialBlockHash, issuanceScript, [][]byte{[]byte{1, 2, 3}}, nil),
+                               },
+                               Outputs: []*TxOutput{
+                                       NewTxOutput(bc.AssetID{}, 1000000000000, []byte{1}, []byte("output")),
+                               },
+                               MinTime:       0,
+                               MaxTime:       0,
+                               ReferenceData: []byte("issuance"),
+                       }),
+                       hex: ("07" + // serflags
+                               "01" + // transaction version
+                               "02" + // common fields extensible string length
+                               "00" + // common fields, mintime
+                               "00" + // common fields, maxtime
+                               "00" + // common witness extensible string length
+                               "01" + // inputs count
+                               "01" + // input 0, asset version
+                               "2b" + // input 0, input commitment length prefix
+                               "00" + // input 0, input commitment, "issuance" type
+                               "03" + // input 0, input commitment, nonce length prefix
+                               "0a0908" + // input 0, input commitment, nonce
+                               assetID.String() + // input 0, input commitment, asset id
+                               "80a094a58d1d" + // input 0, input commitment, amount
+                               "05696e707574" + // input 0, reference data
+                               "29" + // input 0, issuance input witness length prefix
+                               initialBlockHashHex + // input 0, issuance input witness, initial block
+                               "00" + // input 0, issuance input witness, asset definition
+                               "01" + // input 0, issuance input witness, vm version
+                               "01" + // input 0, issuance input witness, issuance program length prefix
+                               "01" + // input 0, issuance input witness, issuance program
+                               "01" + // input 0, issuance input witness, arguments count
+                               "03" + // input 0, issuance input witness, argument 0 length prefix
+                               "010203" + // input 0, issuance input witness, argument 0
+                               "01" + // outputs count
+                               "01" + // output 0, asset version
+                               "29" + // output 0, output commitment length
+                               "0000000000000000000000000000000000000000000000000000000000000000" + // output 0, output commitment, asset id
+                               "80a094a58d1d" + // output 0, output commitment, amount
+                               "01" + // output 0, output commitment, vm version
+                               "0101" + // output 0, output commitment, control program
+                               "066f7574707574" + // output 0, reference data
+                               "00" + // output 0, output witness
+                               "0869737375616e6365"), // reference data
+                       hash: mustDecodeHash("cd4669d5363374f8661621273501c23e613fc98b0fab9d5d858f30e16ccd24ce"),
+               },
+               {
+                       tx: NewTx(TxData{
+                               Version: 1,
+                               Inputs: []*TxInput{
+                                       NewSpendInput(nil, mustDecodeHash("dd385f6fe25d91d8c1bd0fa58951ad56b0c5229dcc01f61d9f9e8b9eb92d3292"), bc.AssetID{}, 1000000000000, 1, []byte{1}, bc.Hash{}, []byte("input")),
+                               },
+                               Outputs: []*TxOutput{
+                                       NewTxOutput(assetID, 600000000000, []byte{1}, nil),
+                                       NewTxOutput(assetID, 400000000000, []byte{2}, nil),
+                               },
+                               MinTime:       1492590000,
+                               MaxTime:       1492590591,
+                               ReferenceData: []byte("distribution"),
+                       }),
+                       hex: ("07" + // serflags
+                               "01" + // transaction version
+                               "0a" + // common fields extensible string length
+
+                               "b0bbdcc705" + // common fields, mintime
+                               "ffbfdcc705" + // common fields, maxtime
+                               "00" + // common witness extensible string length
+                               "01" + // inputs count
+                               "01" + // input 0, asset version
+                               "6c" + // input 0, input commitment length prefix
+                               "01" + // input 0, input commitment, "spend" type+
+                               "6a" + // input 0, spend input commitment, spend commitment length prefix
+                               "dd385f6fe25d91d8c1bd0fa58951ad56b0c5229dcc01f61d9f9e8b9eb92d3292" + // input 0, spend input commitment, spend commitment, source ID
+                               "0000000000000000000000000000000000000000000000000000000000000000" + // input 0, spend input commitment, spend commitment, asset id
+                               "80a094a58d1d" + // input 0, spend input commitment, spend commitment, amount
+                               "01" + // input 0, spend input commitment, spend commitment, source position
+                               "01" + // input 0, spend input commitment, spend commitment, vm version
+                               "0101" + // input 0, spend input commitment, spend commitment, control program
+                               "0000000000000000000000000000000000000000000000000000000000000000" + // input 0, spend input commitment, spend commitment, reference data hash
+                               "05696e707574" + // input 0, reference data
+                               "01" + // input 0, input witness length prefix
+                               "00" + // input 0, input witness, number of args
+                               "02" + // outputs count
+                               "01" + // output 0, asset version
+                               "29" + // output 0, output commitment length
+                               "a9b2b6c5394888ab5396f583ae484b8459486b14268e2bef1b637440335eb6c1" + // output 0, output commitment, asset id
+                               "80e0a596bb11" + // output 0, output commitment, amount
+                               "01" + // output 0, output commitment, vm version
+                               "0101" + // output 0, output commitment, control program
+                               "00" + // output 0, reference data
+                               "00" + // output 0, output witness
+                               "01" + // output 1, asset version
+                               "29" + // output 1, output commitment length
+                               "a9b2b6c5394888ab5396f583ae484b8459486b14268e2bef1b637440335eb6c1" + // output 1, output commitment, asset id
+                               "80c0ee8ed20b" + // output 1, output commitment, amount
+                               "01" + // output 1, vm version
+                               "0102" + // output 1, output commitment, control program
+                               "00" + // output 1, reference data
+                               "00" + // output 1, output witness
+                               "0c646973747269627574696f6e"), // reference data
+                       hash: mustDecodeHash("c328ad4278045b4c50e8af7e7d0df198e7d9436d2b5de35df1339f13a1192331"),
+               },
+
+               //07
+               //01
+               //0a
+               //b0bbdcc705
+               //ffbfdcc705
+               //00
+               //01
+               //01
+               //4b
+               //01
+               //dd385f6fe25d91d8c1bd0fa58951ad56b0c5229dcc01f61d9f9e8b9eb92d3292
+               //29
+               //0000000000000000000000000000000000000000000000000000000000000000
+               //80a094a58d1d
+               //01
+               //0101
+               //05696e707574
+               //01
+               //00
+               //02
+               //01
+               //29
+               //a9b2b6c5394888ab5396f583ae484b8459486b14268e2bef1b637440335eb6c1
+               //80e0a596bb11
+               //01
+               //0101
+               //00
+               //00
+               //01
+               //29
+               //a9b2b6c5394888ab5396f583ae484b8459486b14268e2bef1b637440335eb6c1
+               //80c0ee8ed20b
+               //01
+               //0102
+               //00
+               //00
+               //0c646973747269627574696f6e
+
+       }
+       for i, test := range cases {
+               got := serialize(t, test.tx)
+               want, _ := hex.DecodeString(test.hex)
+               if !bytes.Equal(got, want) {
+                       t.Errorf("test %d: bytes = %x want %x", i, got, want)
+               }
+               if test.tx.ID != test.hash {
+                       t.Errorf("test %d: hash = %x want %x", i, test.tx.ID.Bytes(), test.hash.Bytes())
+               }
+
+               txJSON, err := json.Marshal(test.tx)
+               if err != nil {
+                       t.Errorf("test %d: error marshaling tx to json: %s", i, err)
+               }
+               var txFromJSON Tx
+               if err := json.Unmarshal(txJSON, &txFromJSON); err != nil {
+                       t.Errorf("test %d: error unmarshaling tx from json: %s", i, err)
+               }
+               if !testutil.DeepEqual(test.tx.TxData, txFromJSON.TxData) {
+                       t.Errorf("test %d: legacy.TxData -> json -> legacy.TxData: got:\n%s\nwant:\n%s", i, spew.Sdump(txFromJSON.TxData), spew.Sdump(test.tx.TxData))
+               }
+
+               tx1 := new(TxData)
+               if err := tx1.UnmarshalText([]byte(test.hex)); err != nil {
+                       t.Errorf("test %d: unexpected err %v", i, err)
+               }
+               if !testutil.DeepEqual(*tx1, test.tx.TxData) {
+                       t.Errorf("test %d: tx1 is:\n%swant:\n%s", i, spew.Sdump(*tx1), spew.Sdump(test.tx.TxData))
+               }
+       }
+}
+
+func TestHasIssuance(t *testing.T) {
+       cases := []struct {
+               tx   *TxData
+               want bool
+       }{{
+               tx: &TxData{
+                       Inputs: []*TxInput{NewIssuanceInput(nil, 0, nil, bc.Hash{}, nil, nil, nil)},
+               },
+               want: true,
+       }, {
+               tx: &TxData{
+                       Inputs: []*TxInput{
+                               NewSpendInput(nil, bc.Hash{}, bc.AssetID{}, 0, 0, nil, bc.Hash{}, nil),
+                               NewIssuanceInput(nil, 0, nil, bc.Hash{}, nil, nil, nil),
+                       },
+               },
+               want: true,
+       }, {
+               tx: &TxData{
+                       Inputs: []*TxInput{
+                               NewSpendInput(nil, bc.Hash{}, bc.AssetID{}, 0, 0, nil, bc.Hash{}, nil),
+                       },
+               },
+               want: false,
+       }, {
+               tx:   &TxData{},
+               want: false,
+       }}
+
+       for _, c := range cases {
+               got := c.tx.HasIssuance()
+               if got != c.want {
+                       t.Errorf("HasIssuance(%+v) = %v want %v", c.tx, got, c.want)
+               }
+       }
+}
+
+func TestInvalidIssuance(t *testing.T) {
+       hex := ("07" + // serflags
+               "01" + // transaction version
+               "02" + // common fields extensible string length
+               "00" + // common fields, mintime
+               "00" + // common fields, maxtime
+               "00" + // common witness extensible string length
+               "01" + // inputs count
+               "01" + // input 0, asset version
+               "2b" + // input 0, input commitment length prefix
+               "00" + // input 0, input commitment, "issuance" type
+               "03" + // input 0, input commitment, nonce length prefix
+               "0a0908" + // input 0, input commitment, nonce
+               "0000000000000000000000000000000000000000000000000000000000000000" + // input 0, input commitment, WRONG asset id
+               "80a094a58d1d" + // input 0, input commitment, amount
+               "05696e707574" + // input 0, reference data
+               "29" + // input 0, issuance input witness length prefix
+               "03deff1d4319d67baa10a6d26c1fea9c3e8d30e33474efee1a610a9bb49d758d" + // input 0, issuance input witness, initial block
+               "00" + // input 0, issuance input witness, asset definition
+               "01" + // input 0, issuance input witness, vm version
+               "01" + // input 0, issuance input witness, issuance program length prefix
+               "01" + // input 0, issuance input witness, issuance program
+               "01" + // input 0, issuance input witness, arguments count
+               "03" + // input 0, issuance input witness, argument 0 length prefix
+               "010203" + // input 0, issuance input witness, argument 0
+               "01" + // outputs count
+               "01" + // output 0, asset version
+               "29" + // output 0, output commitment length
+               "0000000000000000000000000000000000000000000000000000000000000000" + // output 0, output commitment, asset id
+               "80a094a58d1d" + // output 0, output commitment, amount
+               "01" + // output 0, output commitment, vm version
+               "0101" + // output 0, output commitment, control program
+               "066f7574707574" + // output 0, reference data
+               "00" + // output 0, output witness
+               "0869737375616e6365")
+       tx := new(TxData)
+       err := tx.UnmarshalText([]byte(hex))
+       if errors.Root(err) != errBadAssetID {
+               t.Errorf("want errBadAssetID, got %v", err)
+       }
+}
+
+func BenchmarkTxWriteToTrue(b *testing.B) {
+       tx := &Tx{}
+       for i := 0; i < b.N; i++ {
+               tx.writeTo(ioutil.Discard, 0)
+       }
+}
+
+func BenchmarkTxWriteToFalse(b *testing.B) {
+       tx := &Tx{}
+       for i := 0; i < b.N; i++ {
+               tx.writeTo(ioutil.Discard, serRequired)
+       }
+}
+
+func BenchmarkTxWriteToTrue200(b *testing.B) {
+       tx := &Tx{}
+       for i := 0; i < 200; i++ {
+               tx.Inputs = append(tx.Inputs, NewSpendInput(nil, bc.Hash{}, bc.AssetID{}, 0, 0, nil, bc.Hash{}, nil))
+               tx.Outputs = append(tx.Outputs, NewTxOutput(bc.AssetID{}, 0, nil, nil))
+       }
+       for i := 0; i < b.N; i++ {
+               tx.writeTo(ioutil.Discard, 0)
+       }
+}
+
+func BenchmarkTxWriteToFalse200(b *testing.B) {
+       tx := &Tx{}
+       for i := 0; i < 200; i++ {
+               tx.Inputs = append(tx.Inputs, NewSpendInput(nil, bc.Hash{}, bc.AssetID{}, 0, 0, nil, bc.Hash{}, nil))
+               tx.Outputs = append(tx.Outputs, NewTxOutput(bc.AssetID{}, 0, nil, nil))
+       }
+       for i := 0; i < b.N; i++ {
+               tx.writeTo(ioutil.Discard, serRequired)
+       }
+}
+
+func BenchmarkTxInputWriteToTrue(b *testing.B) {
+       input := NewSpendInput(nil, bc.Hash{}, bc.AssetID{}, 0, 0, nil, bc.Hash{}, nil)
+       ew := errors.NewWriter(ioutil.Discard)
+       for i := 0; i < b.N; i++ {
+               input.writeTo(ew, 0)
+       }
+}
+
+func BenchmarkTxInputWriteToFalse(b *testing.B) {
+       input := NewSpendInput(nil, bc.Hash{}, bc.AssetID{}, 0, 0, nil, bc.Hash{}, nil)
+       ew := errors.NewWriter(ioutil.Discard)
+       for i := 0; i < b.N; i++ {
+               input.writeTo(ew, serRequired)
+       }
+}
+
+func BenchmarkTxOutputWriteToTrue(b *testing.B) {
+       output := NewTxOutput(bc.AssetID{}, 0, nil, nil)
+       ew := errors.NewWriter(ioutil.Discard)
+       for i := 0; i < b.N; i++ {
+               output.writeTo(ew, 0)
+       }
+}
+
+func BenchmarkTxOutputWriteToFalse(b *testing.B) {
+       output := NewTxOutput(bc.AssetID{}, 0, nil, nil)
+       ew := errors.NewWriter(ioutil.Discard)
+       for i := 0; i < b.N; i++ {
+               output.writeTo(ew, serRequired)
+       }
+}
+
+func BenchmarkAssetAmountWriteTo(b *testing.B) {
+       aa := bc.AssetAmount{}
+       for i := 0; i < b.N; i++ {
+               aa.WriteTo(ioutil.Discard)
+       }
+}
diff --git a/protocol/bc/legacy/tx_test.go b/protocol/bc/legacy/tx_test.go
new file mode 100644 (file)
index 0000000..957c0ae
--- /dev/null
@@ -0,0 +1,68 @@
+package legacy
+
+import (
+       "testing"
+
+       "chain/protocol/bc"
+
+       "github.com/davecgh/go-spew/spew"
+)
+
+func TestTxHashes(t *testing.T) {
+       cases := []struct {
+               txdata *TxData
+               hash   bc.Hash
+       }{
+               {
+                       txdata: &TxData{},
+                       hash:   mustDecodeHash("e367a95b0f1dafdedd86f633456c81ef6bd4f2623f0890d56417f73a18a67297"),
+               },
+               {
+                       txdata: sampleTx(),
+                       hash:   mustDecodeHash("9fad4f5024412d99d17508ef3cc66f81f1e09914a71b2641683acca87081c098"), // todo: verify this value,
+               },
+       }
+
+       for i, c := range cases {
+               txEntries := MapTx(c.txdata)
+               if len(txEntries.InputIDs) != len(c.txdata.Inputs) {
+                       t.Errorf("case %d: len(txEntries.InputIDs) = %d, want %d", i, len(txEntries.InputIDs), len(c.txdata.Inputs))
+               }
+               if c.hash != txEntries.ID {
+                       t.Errorf("case %d: got txid %x, want %x. txEntries is:\n%s", i, txEntries.ID.Bytes(), c.hash.Bytes(), spew.Sdump(txEntries))
+               }
+       }
+}
+
+func BenchmarkHashEmptyTx(b *testing.B) {
+       tx := &TxData{}
+       for i := 0; i < b.N; i++ {
+               _ = MapTx(tx)
+       }
+}
+
+func BenchmarkHashNonemptyTx(b *testing.B) {
+       tx := sampleTx()
+       for i := 0; i < b.N; i++ {
+               _ = MapTx(tx)
+       }
+}
+
+func sampleTx() *TxData {
+       initialBlockHash := mustDecodeHash("03deff1d4319d67baa10a6d26c1fea9c3e8d30e33474efee1a610a9bb49d758d")
+       assetID := bc.ComputeAssetID([]byte{1}, &initialBlockHash, 1, &bc.EmptyStringHash)
+       return &TxData{
+               Version: 1,
+               Inputs: []*TxInput{
+                       NewSpendInput(nil, mustDecodeHash("dd385f6fe25d91d8c1bd0fa58951ad56b0c5229dcc01f61d9f9e8b9eb92d3292"), assetID, 1000000000000, 1, []byte{1}, bc.Hash{}, []byte("input")),
+                       NewSpendInput(nil, bc.NewHash([32]byte{0x11}), assetID, 1, 1, []byte{2}, bc.Hash{}, []byte("input2")),
+               },
+               Outputs: []*TxOutput{
+                       NewTxOutput(assetID, 600000000000, []byte{1}, nil),
+                       NewTxOutput(assetID, 400000000000, []byte{2}, nil),
+               },
+               MinTime:       1492590000,
+               MaxTime:       1492590591,
+               ReferenceData: []byte("distribution"),
+       }
+}
diff --git a/protocol/bc/legacy/txinput.go b/protocol/bc/legacy/txinput.go
new file mode 100644 (file)
index 0000000..4b62125
--- /dev/null
@@ -0,0 +1,309 @@
+package legacy
+
+import (
+       "fmt"
+       "io"
+
+       "chain/encoding/blockchain"
+       "chain/errors"
+       "chain/protocol/bc"
+)
+
+type (
+       TxInput struct {
+               AssetVersion  uint64
+               ReferenceData []byte
+               TypedInput
+
+               // Unconsumed suffixes of the commitment and witness extensible
+               // strings.
+               CommitmentSuffix []byte
+               WitnessSuffix    []byte
+       }
+
+       TypedInput interface {
+               IsIssuance() bool
+       }
+)
+
+var errBadAssetID = errors.New("asset ID does not match other issuance parameters")
+
+func (t *TxInput) AssetAmount() bc.AssetAmount {
+       if ii, ok := t.TypedInput.(*IssuanceInput); ok {
+               assetID := ii.AssetID()
+               return bc.AssetAmount{
+                       AssetId: &assetID,
+                       Amount:  ii.Amount,
+               }
+       }
+       si := t.TypedInput.(*SpendInput)
+       return si.AssetAmount
+}
+
+func (t *TxInput) AssetID() bc.AssetID {
+       if ii, ok := t.TypedInput.(*IssuanceInput); ok {
+               return ii.AssetID()
+       }
+       si := t.TypedInput.(*SpendInput)
+       return *si.AssetId
+}
+
+func (t *TxInput) Amount() uint64 {
+       if ii, ok := t.TypedInput.(*IssuanceInput); ok {
+               return ii.Amount
+       }
+       si := t.TypedInput.(*SpendInput)
+       return si.Amount
+}
+
+func (t *TxInput) ControlProgram() []byte {
+       if si, ok := t.TypedInput.(*SpendInput); ok {
+               return si.ControlProgram
+       }
+       return nil
+}
+
+func (t *TxInput) IssuanceProgram() []byte {
+       if ii, ok := t.TypedInput.(*IssuanceInput); ok {
+               return ii.IssuanceProgram
+       }
+       return nil
+}
+
+func (t *TxInput) Arguments() [][]byte {
+       switch inp := t.TypedInput.(type) {
+       case *IssuanceInput:
+               return inp.Arguments
+       case *SpendInput:
+               return inp.Arguments
+       }
+       return nil
+}
+
+func (t *TxInput) SetArguments(args [][]byte) {
+       switch inp := t.TypedInput.(type) {
+       case *IssuanceInput:
+               inp.Arguments = args
+       case *SpendInput:
+               inp.Arguments = args
+       }
+}
+
+func (t *TxInput) readFrom(r *blockchain.Reader) (err error) {
+       t.AssetVersion, err = blockchain.ReadVarint63(r)
+       if err != nil {
+               return err
+       }
+
+       var (
+               ii      *IssuanceInput
+               si      *SpendInput
+               assetID bc.AssetID
+       )
+
+       t.CommitmentSuffix, err = blockchain.ReadExtensibleString(r, func(r *blockchain.Reader) error {
+               if t.AssetVersion != 1 {
+                       return nil
+               }
+               var icType [1]byte
+               _, err = io.ReadFull(r, icType[:])
+               if err != nil {
+                       return errors.Wrap(err, "reading input commitment type")
+               }
+               switch icType[0] {
+               case 0:
+                       ii = new(IssuanceInput)
+
+                       ii.Nonce, err = blockchain.ReadVarstr31(r)
+                       if err != nil {
+                               return err
+                       }
+                       _, err = assetID.ReadFrom(r)
+                       if err != nil {
+                               return err
+                       }
+                       ii.Amount, err = blockchain.ReadVarint63(r)
+                       if err != nil {
+                               return err
+                       }
+
+               case 1:
+                       si = new(SpendInput)
+                       si.SpendCommitmentSuffix, err = si.SpendCommitment.readFrom(r, 1)
+                       if err != nil {
+                               return err
+                       }
+
+               default:
+                       return fmt.Errorf("unsupported input type %d", icType[0])
+               }
+               return nil
+       })
+       if err != nil {
+               return err
+       }
+
+       t.ReferenceData, err = blockchain.ReadVarstr31(r)
+       if err != nil {
+               return err
+       }
+
+       t.WitnessSuffix, err = blockchain.ReadExtensibleString(r, func(r *blockchain.Reader) error {
+               // TODO(bobg): test that serialization flags include SerWitness, when we relax the serflags-must-be-0x7 rule
+               if t.AssetVersion != 1 {
+                       return nil
+               }
+
+               if ii != nil {
+                       // read IssuanceInput witness
+                       _, err = ii.InitialBlock.ReadFrom(r)
+                       if err != nil {
+                               return err
+                       }
+
+                       ii.AssetDefinition, err = blockchain.ReadVarstr31(r)
+                       if err != nil {
+                               return err
+                       }
+
+                       ii.VMVersion, err = blockchain.ReadVarint63(r)
+                       if err != nil {
+                               return err
+                       }
+
+                       ii.IssuanceProgram, err = blockchain.ReadVarstr31(r)
+                       if err != nil {
+                               return err
+                       }
+
+                       if ii.AssetID() != assetID {
+                               return errBadAssetID
+                       }
+               }
+               args, err := blockchain.ReadVarstrList(r)
+               if err != nil {
+                       return err
+               }
+               if ii != nil {
+                       ii.Arguments = args
+               } else if si != nil {
+                       si.Arguments = args
+               }
+               return nil
+       })
+       if err != nil {
+               return err
+       }
+       if ii != nil {
+               t.TypedInput = ii
+       } else if si != nil {
+               t.TypedInput = si
+       }
+       return nil
+}
+
+func (t *TxInput) writeTo(w io.Writer, serflags uint8) error {
+       _, err := blockchain.WriteVarint63(w, t.AssetVersion)
+       if err != nil {
+               return errors.Wrap(err, "writing asset version")
+       }
+
+       _, err = blockchain.WriteExtensibleString(w, t.CommitmentSuffix, func(w io.Writer) error {
+               return t.WriteInputCommitment(w, serflags)
+       })
+
+       if err != nil {
+               return errors.Wrap(err, "writing input commitment")
+       }
+
+       _, err = blockchain.WriteVarstr31(w, t.ReferenceData)
+       if err != nil {
+               return errors.Wrap(err, "writing reference data")
+       }
+
+       if serflags&SerWitness != 0 {
+               _, err = blockchain.WriteExtensibleString(w, t.WitnessSuffix, t.writeInputWitness)
+               if err != nil {
+                       return errors.Wrap(err, "writing input witness")
+               }
+       }
+
+       return nil
+}
+
+func (t *TxInput) WriteInputCommitment(w io.Writer, serflags uint8) error {
+       if t.AssetVersion != 1 {
+               return nil
+       }
+       switch inp := t.TypedInput.(type) {
+       case *IssuanceInput:
+               _, err := w.Write([]byte{0}) // issuance type
+               if err != nil {
+                       return err
+               }
+               _, err = blockchain.WriteVarstr31(w, inp.Nonce)
+               if err != nil {
+                       return err
+               }
+               assetID := t.AssetID()
+               _, err = assetID.WriteTo(w)
+               if err != nil {
+                       return err
+               }
+               _, err = blockchain.WriteVarint63(w, inp.Amount)
+               return err
+
+       case *SpendInput:
+               _, err := w.Write([]byte{1}) // spend type
+               if err != nil {
+                       return err
+               }
+               if serflags&SerPrevout != 0 {
+                       err = inp.SpendCommitment.writeExtensibleString(w, inp.SpendCommitmentSuffix, t.AssetVersion)
+               } else {
+                       prevouthash := inp.SpendCommitment.Hash(inp.SpendCommitmentSuffix, t.AssetVersion)
+                       _, err = prevouthash.WriteTo(w)
+               }
+               return err
+       }
+       return nil
+}
+
+func (t *TxInput) writeInputWitness(w io.Writer) error {
+       if t.AssetVersion != 1 {
+               return nil
+       }
+       switch inp := t.TypedInput.(type) {
+       case *IssuanceInput:
+               _, err := inp.InitialBlock.WriteTo(w)
+               if err != nil {
+                       return err
+               }
+               _, err = blockchain.WriteVarstr31(w, inp.AssetDefinition)
+               if err != nil {
+                       return err
+               }
+               _, err = blockchain.WriteVarint63(w, inp.VMVersion)
+               if err != nil {
+                       return err
+               }
+               _, err = blockchain.WriteVarstr31(w, inp.IssuanceProgram)
+               if err != nil {
+                       return err
+               }
+               _, err = blockchain.WriteVarstrList(w, inp.Arguments)
+               return err
+
+       case *SpendInput:
+               _, err := blockchain.WriteVarstrList(w, inp.Arguments)
+               return err
+       }
+       return nil
+}
+
+func (t *TxInput) SpentOutputID() (o bc.Hash, err error) {
+       if si, ok := t.TypedInput.(*SpendInput); ok {
+               o, err = ComputeOutputID(&si.SpendCommitment)
+       }
+       return o, err
+}
diff --git a/protocol/bc/legacy/txoutput.go b/protocol/bc/legacy/txoutput.go
new file mode 100644 (file)
index 0000000..1b73074
--- /dev/null
@@ -0,0 +1,111 @@
+package legacy
+
+import (
+       "io"
+
+       "chain/encoding/blockchain"
+       "chain/errors"
+       "chain/protocol/bc"
+)
+
+// TODO(bobg): Review serialization/deserialization logic for
+// assetVersions other than 1.
+
+type TxOutput struct {
+       AssetVersion uint64
+       OutputCommitment
+
+       // Unconsumed suffixes of the commitment and witness extensible strings.
+       CommitmentSuffix []byte
+       WitnessSuffix    []byte
+
+       ReferenceData []byte
+}
+
+func NewTxOutput(assetID bc.AssetID, amount uint64, controlProgram, referenceData []byte) *TxOutput {
+       return &TxOutput{
+               AssetVersion: 1,
+               OutputCommitment: OutputCommitment{
+                       AssetAmount: bc.AssetAmount{
+                               AssetId: &assetID,
+                               Amount:  amount,
+                       },
+                       VMVersion:      1,
+                       ControlProgram: controlProgram,
+               },
+               ReferenceData: referenceData,
+       }
+}
+
+func (to *TxOutput) readFrom(r *blockchain.Reader, txVersion uint64) (err error) {
+       to.AssetVersion, err = blockchain.ReadVarint63(r)
+       if err != nil {
+               return errors.Wrap(err, "reading asset version")
+       }
+
+       to.CommitmentSuffix, err = to.OutputCommitment.readFrom(r, to.AssetVersion)
+       if err != nil {
+               return errors.Wrap(err, "reading output commitment")
+       }
+
+       to.ReferenceData, err = blockchain.ReadVarstr31(r)
+       if err != nil {
+               return errors.Wrap(err, "reading reference data")
+       }
+
+       // read and ignore the (empty) output witness
+       _, err = blockchain.ReadVarstr31(r)
+
+       return errors.Wrap(err, "reading output witness")
+}
+
+func (to *TxOutput) writeTo(w io.Writer, serflags byte) error {
+       _, err := blockchain.WriteVarint63(w, to.AssetVersion)
+       if err != nil {
+               return errors.Wrap(err, "writing asset version")
+       }
+
+       err = to.WriteCommitment(w)
+       if err != nil {
+               return errors.Wrap(err, "writing output commitment")
+       }
+
+       err = writeRefData(w, to.ReferenceData, serflags)
+       if err != nil {
+               return errors.Wrap(err, "writing reference data")
+       }
+
+       // write witness (empty in v1)
+       _, err = blockchain.WriteVarstr31(w, nil)
+       if err != nil {
+               return errors.Wrap(err, "writing witness")
+       }
+       return nil
+}
+
+func (to *TxOutput) WriteCommitment(w io.Writer) error {
+       return to.OutputCommitment.writeExtensibleString(w, to.CommitmentSuffix, to.AssetVersion)
+}
+
+func (to *TxOutput) CommitmentHash() bc.Hash {
+       return to.OutputCommitment.Hash(to.CommitmentSuffix, to.AssetVersion)
+}
+
+// ComputeOutputID assembles an output entry given a spend commitment
+// and computes and returns its corresponding entry ID.
+func ComputeOutputID(sc *SpendCommitment) (h bc.Hash, err error) {
+       defer func() {
+               if r, ok := recover().(error); ok {
+                       err = r
+               }
+       }()
+       src := &bc.ValueSource{
+               Ref:      &sc.SourceID,
+               Value:    &sc.AssetAmount,
+               Position: sc.SourcePosition,
+       }
+       o := bc.NewOutput(src, &bc.Program{VmVersion: sc.VMVersion, Code: sc.ControlProgram}, &sc.RefDataHash, 0)
+
+       h = bc.EntryID(o)
+       return h, nil
+}
diff --git a/protocol/bc/merkle.go b/protocol/bc/merkle.go
new file mode 100644 (file)
index 0000000..c646a30
--- /dev/null
@@ -0,0 +1,64 @@
+package bc
+
+import (
+       "math"
+
+       "chain/crypto/sha3pool"
+)
+
+var (
+       leafPrefix     = []byte{0x00}
+       interiorPrefix = []byte{0x01}
+)
+
+// MerkleRoot creates a merkle tree from a slice of transactions
+// and returns the root hash of the tree.
+func MerkleRoot(transactions []*Tx) (root Hash, err error) {
+       switch {
+       case len(transactions) == 0:
+               return EmptyStringHash, nil
+
+       case len(transactions) == 1:
+               h := sha3pool.Get256()
+               defer sha3pool.Put256(h)
+
+               h.Write(leafPrefix)
+               transactions[0].ID.WriteTo(h)
+               root.ReadFrom(h)
+               return root, nil
+
+       default:
+               k := prevPowerOfTwo(len(transactions))
+               left, err := MerkleRoot(transactions[:k])
+               if err != nil {
+                       return root, err
+               }
+
+               right, err := MerkleRoot(transactions[k:])
+               if err != nil {
+                       return root, err
+               }
+
+               h := sha3pool.Get256()
+               defer sha3pool.Put256(h)
+               h.Write(interiorPrefix)
+               left.WriteTo(h)
+               right.WriteTo(h)
+               root.ReadFrom(h)
+               return root, nil
+       }
+}
+
+// prevPowerOfTwo returns the largest power of two that is smaller than a given number.
+// In other words, for some input n, the prevPowerOfTwo k is a power of two such that
+// k < n <= 2k. This is a helper function used during the calculation of a merkle tree.
+func prevPowerOfTwo(n int) int {
+       // If the number is a power of two, divide it by 2 and return.
+       if n&(n-1) == 0 {
+               return n / 2
+       }
+
+       // Otherwise, find the previous PoT.
+       exponent := uint(math.Log2(float64(n)))
+       return 1 << exponent // 2^exponent
+}
diff --git a/protocol/bc/merkle_test.go b/protocol/bc/merkle_test.go
new file mode 100644 (file)
index 0000000..584abce
--- /dev/null
@@ -0,0 +1,148 @@
+package bc_test
+
+import (
+       "testing"
+       "time"
+
+       . "chain/protocol/bc"
+       "chain/protocol/bc/legacy"
+       "chain/protocol/vm"
+)
+
+func TestMerkleRoot(t *testing.T) {
+       cases := []struct {
+               witnesses [][][]byte
+               want      Hash
+       }{{
+               witnesses: [][][]byte{
+                       [][]byte{
+                               {1},
+                               []byte("00000"),
+                       },
+               },
+               want: mustDecodeHash("77eae4222f60bfd74c07994d700161d0b831ed723037952b9c7ee98ed8766977"),
+       }, {
+               witnesses: [][][]byte{
+                       [][]byte{
+                               {1},
+                               []byte("000000"),
+                       },
+                       [][]byte{
+                               {1},
+                               []byte("111111"),
+                       },
+               },
+               want: mustDecodeHash("526737fcca853f5ad352081c5a7341aca4ee05b09a002c8600e26a06df02aa3b"),
+       }, {
+               witnesses: [][][]byte{
+                       [][]byte{
+                               {1},
+                               []byte("000000"),
+                       },
+                       [][]byte{
+                               {2},
+                               []byte("111111"),
+                               []byte("222222"),
+                       },
+               },
+               want: mustDecodeHash("526737fcca853f5ad352081c5a7341aca4ee05b09a002c8600e26a06df02aa3b"),
+       }}
+
+       for _, c := range cases {
+               var txs []*Tx
+               for _, wit := range c.witnesses {
+                       txs = append(txs, legacy.NewTx(legacy.TxData{
+                               Inputs: []*legacy.TxInput{
+                                       &legacy.TxInput{
+                                               AssetVersion: 1,
+                                               TypedInput: &legacy.SpendInput{
+                                                       Arguments: wit,
+                                               },
+                                       },
+                               },
+                       }).Tx)
+               }
+               got, err := MerkleRoot(txs)
+               if err != nil {
+                       t.Fatalf("unexpected error %s", err)
+               }
+               if got != c.want {
+                       t.Log("witnesses", c.witnesses)
+                       t.Errorf("got merkle root = %x want %x", got.Bytes(), c.want.Bytes())
+               }
+       }
+}
+
+func TestDuplicateLeaves(t *testing.T) {
+       var initialBlockHash Hash
+       trueProg := []byte{byte(vm.OP_TRUE)}
+       assetID := ComputeAssetID(trueProg, &initialBlockHash, 1, &EmptyStringHash)
+       txs := make([]*Tx, 6)
+       for i := uint64(0); i < 6; i++ {
+               now := []byte(time.Now().String())
+               txs[i] = legacy.NewTx(legacy.TxData{
+                       Version: 1,
+                       Inputs:  []*legacy.TxInput{legacy.NewIssuanceInput(now, i, nil, initialBlockHash, trueProg, nil, nil)},
+                       Outputs: []*legacy.TxOutput{legacy.NewTxOutput(assetID, i, trueProg, nil)},
+               }).Tx
+       }
+
+       // first, get the root of an unbalanced tree
+       txns := []*Tx{txs[5], txs[4], txs[3], txs[2], txs[1], txs[0]}
+       root1, err := MerkleRoot(txns)
+       if err != nil {
+               t.Fatalf("unexpected error %s", err)
+       }
+
+       // now, get the root of a balanced tree that repeats leaves 0 and 1
+       txns = []*Tx{txs[5], txs[4], txs[3], txs[2], txs[1], txs[0], txs[1], txs[0]}
+       root2, err := MerkleRoot(txns)
+       if err != nil {
+               t.Fatalf("unexpected error %s", err)
+       }
+
+       if root1 == root2 {
+               t.Error("forged merkle tree by duplicating some leaves")
+       }
+}
+
+func TestAllDuplicateLeaves(t *testing.T) {
+       var initialBlockHash Hash
+       trueProg := []byte{byte(vm.OP_TRUE)}
+       assetID := ComputeAssetID(trueProg, &initialBlockHash, 1, &EmptyStringHash)
+       now := []byte(time.Now().String())
+       issuanceInp := legacy.NewIssuanceInput(now, 1, nil, initialBlockHash, trueProg, nil, nil)
+
+       tx := legacy.NewTx(legacy.TxData{
+               Version: 1,
+               Inputs:  []*legacy.TxInput{issuanceInp},
+               Outputs: []*legacy.TxOutput{legacy.NewTxOutput(assetID, 1, trueProg, nil)},
+       }).Tx
+       tx1, tx2, tx3, tx4, tx5, tx6 := tx, tx, tx, tx, tx, tx
+
+       // first, get the root of an unbalanced tree
+       txs := []*Tx{tx6, tx5, tx4, tx3, tx2, tx1}
+       root1, err := MerkleRoot(txs)
+       if err != nil {
+               t.Fatalf("unexpected error %s", err)
+       }
+
+       // now, get the root of a balanced tree that repeats leaves 5 and 6
+       txs = []*Tx{tx6, tx5, tx6, tx5, tx4, tx3, tx2, tx1}
+       root2, err := MerkleRoot(txs)
+       if err != nil {
+               t.Fatalf("unexpected error %s", err)
+       }
+
+       if root1 == root2 {
+               t.Error("forged merkle tree with all duplicate leaves")
+       }
+}
+
+func mustDecodeHash(s string) (h Hash) {
+       err := h.UnmarshalText([]byte(s))
+       if err != nil {
+               panic(err)
+       }
+       return h
+}
diff --git a/protocol/bc/mux.go b/protocol/bc/mux.go
new file mode 100644 (file)
index 0000000..415169e
--- /dev/null
@@ -0,0 +1,22 @@
+package bc
+
+import "io"
+
+// Mux splits and combines value from one or more source entries,
+// making it available to one or more destination entries. It
+// satisfies the Entry interface.
+
+func (Mux) typ() string { return "mux1" }
+func (m *Mux) writeForHash(w io.Writer) {
+       mustWriteForHash(w, m.Sources)
+       mustWriteForHash(w, m.Program)
+       mustWriteForHash(w, m.ExtHash)
+}
+
+// NewMux creates a new Mux.
+func NewMux(sources []*ValueSource, program *Program) *Mux {
+       return &Mux{
+               Sources: sources,
+               Program: program,
+       }
+}
diff --git a/protocol/bc/nonce.go b/protocol/bc/nonce.go
new file mode 100644 (file)
index 0000000..bf1fda3
--- /dev/null
@@ -0,0 +1,26 @@
+package bc
+
+import "io"
+
+// Nonce contains data used, among other things, for distinguishing
+// otherwise-identical issuances (when used as those issuances'
+// "anchors"). It satisfies the Entry interface.
+
+func (Nonce) typ() string { return "nonce1" }
+func (n *Nonce) writeForHash(w io.Writer) {
+       mustWriteForHash(w, n.Program)
+       mustWriteForHash(w, n.TimeRangeId)
+       mustWriteForHash(w, n.ExtHash)
+}
+
+// NewNonce creates a new Nonce.
+func NewNonce(p *Program, trID *Hash) *Nonce {
+       return &Nonce{
+               Program:     p,
+               TimeRangeId: trID,
+       }
+}
+
+func (n *Nonce) SetAnchored(id *Hash) {
+       n.WitnessAnchoredId = id
+}
diff --git a/protocol/bc/output.go b/protocol/bc/output.go
new file mode 100644 (file)
index 0000000..32d5e3e
--- /dev/null
@@ -0,0 +1,27 @@
+package bc
+
+import "io"
+
+// Output is the result of a transfer of value. The value it contains
+// may be accessed by a later Spend entry (if that entry can satisfy
+// the Output's ControlProgram). Output satisfies the Entry interface.
+//
+// (Not to be confused with the deprecated type TxOutput.)
+
+func (Output) typ() string { return "output1" }
+func (o *Output) writeForHash(w io.Writer) {
+       mustWriteForHash(w, o.Source)
+       mustWriteForHash(w, o.ControlProgram)
+       mustWriteForHash(w, o.Data)
+       mustWriteForHash(w, o.ExtHash)
+}
+
+// NewOutput creates a new Output.
+func NewOutput(source *ValueSource, controlProgram *Program, data *Hash, ordinal uint64) *Output {
+       return &Output{
+               Source:         source,
+               ControlProgram: controlProgram,
+               Data:           data,
+               Ordinal:        ordinal,
+       }
+}
diff --git a/protocol/bc/retirement.go b/protocol/bc/retirement.go
new file mode 100644 (file)
index 0000000..28b7d07
--- /dev/null
@@ -0,0 +1,23 @@
+package bc
+
+import "io"
+
+// Retirement is for the permanent removal of some value from a
+// blockchain. The value it contains can never be obtained by later
+// entries. Retirement satisfies the Entry interface.
+
+func (Retirement) typ() string { return "retirement1" }
+func (r *Retirement) writeForHash(w io.Writer) {
+       mustWriteForHash(w, r.Source)
+       mustWriteForHash(w, r.Data)
+       mustWriteForHash(w, r.ExtHash)
+}
+
+// NewRetirement creates a new Retirement.
+func NewRetirement(source *ValueSource, data *Hash, ordinal uint64) *Retirement {
+       return &Retirement{
+               Source:  source,
+               Data:    data,
+               Ordinal: ordinal,
+       }
+}
diff --git a/protocol/bc/spend.go b/protocol/bc/spend.go
new file mode 100644 (file)
index 0000000..b9c265c
--- /dev/null
@@ -0,0 +1,36 @@
+package bc
+
+import "io"
+
+// Spend accesses the value in a prior Output for transfer
+// elsewhere. It satisfies the Entry interface.
+//
+// (Not to be confused with the deprecated type SpendInput.)
+
+func (Spend) typ() string { return "spend1" }
+func (s *Spend) writeForHash(w io.Writer) {
+       mustWriteForHash(w, s.SpentOutputId)
+       mustWriteForHash(w, s.Data)
+       mustWriteForHash(w, s.ExtHash)
+}
+
+func (s *Spend) SetDestination(id *Hash, val *AssetAmount, pos uint64) {
+       s.WitnessDestination = &ValueDestination{
+               Ref:      id,
+               Value:    val,
+               Position: pos,
+       }
+}
+
+// NewSpend creates a new Spend.
+func NewSpend(spentOutputID *Hash, data *Hash, ordinal uint64) *Spend {
+       return &Spend{
+               SpentOutputId: spentOutputID,
+               Data:          data,
+               Ordinal:       ordinal,
+       }
+}
+
+func (s *Spend) SetAnchored(id *Hash) {
+       s.WitnessAnchoredId = id
+}
diff --git a/protocol/bc/time.go b/protocol/bc/time.go
new file mode 100644 (file)
index 0000000..c6290cd
--- /dev/null
@@ -0,0 +1,18 @@
+package bc
+
+import "time"
+
+// Millis converts a time.Time to a number of milliseconds since 1970.
+func Millis(t time.Time) uint64 {
+       return uint64(t.UnixNano()) / uint64(time.Millisecond)
+}
+
+// DurationMillis converts a time.Duration to a number of milliseconds.
+func DurationMillis(d time.Duration) uint64 {
+       return uint64(d / time.Millisecond)
+}
+
+// MillisDuration coverts milliseconds to a time.Duration.
+func MillisDuration(m uint64) time.Duration {
+       return time.Duration(m) * time.Millisecond
+}
diff --git a/protocol/bc/timerange.go b/protocol/bc/timerange.go
new file mode 100644 (file)
index 0000000..fc35bd5
--- /dev/null
@@ -0,0 +1,20 @@
+package bc
+
+import "io"
+
+// TimeRange denotes a time range. It satisfies the Entry interface.
+
+func (TimeRange) typ() string { return "timerange1" }
+func (tr *TimeRange) writeForHash(w io.Writer) {
+       mustWriteForHash(w, tr.MinTimeMs)
+       mustWriteForHash(w, tr.MaxTimeMs)
+       mustWriteForHash(w, tr.ExtHash)
+}
+
+// NewTimeRange creates a new TimeRange.
+func NewTimeRange(minTimeMS, maxTimeMS uint64) *TimeRange {
+       return &TimeRange{
+               MinTimeMs: minTimeMS,
+               MaxTimeMs: maxTimeMS,
+       }
+}
diff --git a/protocol/bc/translation.md b/protocol/bc/translation.md
new file mode 100644 (file)
index 0000000..99151b6
--- /dev/null
@@ -0,0 +1,206 @@
+## Translation Layer
+
+(This is a temporary guide for translating between old-style transaction data structures and new-style transaction data structures.)
+
+### OldTx -> NewTx
+
+This is a first intermediate step that allows keeping old SDK, old tx index and data structures within Core, but refactoring how txs and outputs are hashed for UTXO set and merkle root in block headers.
+
+1. Let `oldtx` be the transaction in old format.
+2. Let `newtx` be a new instance of `TxHeader` entry.
+3. Let `container` be the container for all entries.
+4. Set `newtx.version` to `oldtx.version`.
+5. If `oldtx.data` is non-empty:
+    1. Let `refdata` be a new `Data` entry.
+    2. Set `refdata.body` to `tx.data`.
+    3. Set `newtx.data` to `refdata.id`.
+    4. Add `refdata` to the `container`.
+6. Set `newtx.mintime` to `oldtx.mintime`.
+7. Set `newtx.maxtime` to `oldtx.maxtime`.
+8. Let `mux` be a new `Mux` entry.
+9. For each old input `oldinp`:
+    1. If the old input is issuance:
+        1. Let `is` be a new `Issuance` entry.
+        2. Set `is.value` to `AssetAmount { oldinp.assetid, oldinp.amount }`.
+        3. If `nonce` is empty:
+            1. Set `is.anchor` to the ID of the first new input. (If no input was mapped yet, go back to this step when such input is added.)
+        4. If `nonce` is non-empty:
+            1. Let `a` be a new `Nonce` entry.
+            2. Set `a.program` to (VM1, `PUSHDATA(nonce) DROP ASSET PUSHDATA(oldinp.assetid) EQUAL`. (The program pushes the nonce onto the stack then drops it, then calls the ASSET opcode, pushes the hardcoded old asset ID onto the stack, and checks that they are equal.)
+            3. Let `tr` be a new `TimeRange` entry.
+            4. Set `tr.mintime` to `oldtx.mintime`.
+            5. Set `tr.maxtime` to `oldtx.maxtime`.
+            6. Set `a.timerange` to `tr.id`.
+            7. Set `is.anchor` to `a.id`.
+            8. Add `a` to `container`.
+            9. Add `tr` to `container`.
+        5. Set `is.initial_block_id` to `oldinp.initial_block_id`.
+        6. Set `is.issuance_program` to `oldinp.issuance_program` (with its VM version).
+        7. If `oldinp.asset_definition` is non-empty:
+            1. Let `adef` be a new `Data` entry.
+            2. Set `adef.body` to `oldinp.asset_definition`.
+            3. Set `is.asset_definition` to `adef.id`.
+            4. Add `adef` to `container`.
+        8. If `oldinp.asset_definition` is empty:
+            1. Set `is.asset_definition` to a nil pointer `0x000000...`.
+        9. Create `ValueSource` struct `src`:
+            1. Set `src.ref` to `is.id`.
+            2. Set `src.position` to 0.
+            3. Set `src.value` to `is.value`.
+            4. Add `src` to `mux.sources`.
+        10. Add `is` to `container`.
+    2. If the old input is a spend:
+        1. Let `inp` be a new `Spend` entry.
+        2. Set `inp.spent_output` to `oldinp.output_id`.
+        3. Set `inp.data` to a nil pointer `0x00000...`.
+        4. Create `ValueSource` struct `src`:
+            1. Set `src.ref` to `inp.id`.
+            2. Set `src.position` to 0.
+            3. Set `src.value` to `AssetAmount{ oldinp.spent_output.(assetid,amount) } `.
+            4. Add `src` to `mux.sources`.
+        5. Add `inp` to `container`.
+11. For each output `oldout` at index `i`:
+    1. If the `oldout` contains a retirement program:
+        1. Let `destentry` be a new `Retirement` entry.
+    2. If the `oldout` is not a retirement:
+        1. Let `destentry` be a new `Output` entry.
+        2. Set `destentry.control_program` to `oldout.control_program` (with its VM version).
+    3. Create `ValueSource` struct `src`:
+        1. Set `src.ref` to `mux.id`.
+        2. Set `src.position` to `i`.
+        3. Set `src.value` to `AssetAmount { oldout.asset_id, oldout.amount }`.
+        4. Set `destentry.source` to `src`.
+    4. If `oldout.data` is non-empty:
+        1. Let `data` be a new `Data` entry.
+        2. Set `data.body` to `oldout.data`.
+        3. Set `destentry.data` to `data.id`.
+        4. Add `data` to `container`.
+    5. Add `destentry` to `container`.
+    6. Add `destentry` to `newtx.results`.
+
+
+### OldTxID -> NewTxID
+
+1. Map old tx to `newtx`.
+2. Return new tx's header ID as NewTxID.
+
+### OldWitTxID -> NewWitTxID
+
+1. Map old tx to new tx.
+2. Return new tx's header ID as NewWitTxID. This is the same as NewTxID.
+
+### OldOutputID -> NewOutputID
+
+When indexing old tx's outputs:
+
+1. Map old tx to new tx.
+2. Take corresponding new output.
+3. Compute its entry ID which will be NewOutputID.
+4. Use this new output ID to identify unspent outputs in the DB.
+
+### OldUnspentID -> NewUnspentID
+
+When inserting old tx's outputs into UTXO merkle set:
+
+1. Map old tx to new tx.
+2. Take corresponding new output.
+3. Compute its entry ID which will be NewUnspentID. (This is the same as NewOutputID.)
+4. Use this new unspent ID to insert into UTXO merkle set.
+
+
+### OldIssuanceHash -> NewIssuanceHash
+
+1. Map old tx to new tx.
+2. For each nonce entry in the new tx:
+    1. check its time range is within network-defined limits (not unbounded).
+    2. Use this entry ID as NewIssuanceHash
+    3. Insert new issuance hash in the current _issuance memory_ annotated with expiration date based on `nonce.timerange.maxtime`.
+
+### OldSigHash -> NewSigHash
+
+1. Map old tx to new tx.
+2. For each entry where a program is evaluated (Spend, Issuance or Nonce):
+    1. Compute `sighash = HASH(txid || entryid)`.
+
+
+
+### NewTx -> OldTx
+
+This is a second intermediate step that allows keeping old SDK, but refactoring how txs are represented and stored internally in Core.
+
+TODO: ...
+
+
+## Compression
+
+1. Serialization prefix indicates the format version that specifies how things are serialized and compressed.
+2. Replace hashes with varint offsets, reconstruct hashes in real time and then verify that top hash matches the source (e.g. merkle tree item)
+3. Replace some repeated elements such as initial block id with indices too.
+
+### VM mapping
+
+This shows how the implementation of each of the VM instructions need to be changed. Ones that say "no change" will work as already implemented on the OLD data structure. 
+
+* CHECKOUTPUT:   no change
+* ASSET:         no change
+* AMOUNT:        no change
+* PROGRAM:       no change
+* MINTIME:       no change
+* MAXTIME:       no change
+* INDEX:         no change
+* NONCE:         eliminated
+* TXREFDATAHASH: `newtx.refdatahash()`
+* REFDATAHASH:   `newcurrentinput.refdatahash()`
+* TXSIGHASH:     `hash(newcurrentinput.id() || newtx.id())`
+* OUTPUTID:      `newcurrentinput.spent_output.id()`
+
+
+New opcodes:
+
+* ENTRYID:       `currententry.id()`
+* NONCE:         `currentissuance.anchor.id()` (fails if the entry is not an issuance)
+
+
+### Block header format
+
+The new slightly different serialization format (i.e. the type prefix and extension hash format) should be applied to the block header as well. We are also removing the block witness from the block ID, as discussed above. Finally, we should flatten the confusing "block commitment" and simply make it three separate fields in the block header.
+
+#### BlockHeader entry
+
+    entry {
+        type="blockheader"
+        body:
+            version:                Integer
+            height:                 Integer
+            previous_block_id:      Pointer<BlockHeader>
+            timestamp:              Integer
+            transactions:           MerkleTree<Pointer<TxHeader>>
+            assets:                 PatriciaTree<Pointer<Output>>
+            next_consensus_program: String
+            ext_hash:               Hash
+        witness:
+            ext_hash:               Hash        
+    }
+
+The `MerkleTree` and `PatriciaTree` types are just 32-byte hashes representing the root of those respective trees.
+
+#### OldBlockHeader -> NewBlockHeader
+
+This generates a new BlockHeader data structure, for hashing purposes, from an old block.
+
+1. Let `oldblock` be the block in old format.
+2. Let `newblock` be a new instance of `BlockHeader` entry.
+3. Set `newblock.version` to `oldblock.version`.
+4. Set `newblock.height` to `oldblock.height`.
+5. Set `newblock.previous_block_id` to `oldblock.previous_block_id`.
+6. Set `newblock.timestamp` to `oldblock.timestamp`.
+7. Set `newblock.transactions` to `oldblock.block_commitment.transactions` (i.e. the root of the Merkle tree). Note that this Merkle tree should have been calculated using the new transaction ID.
+7. Set `newblock.assets` to `oldblock.block_commitment.assets` (i.e. the root of the Patricia tree). Note that this Patricia tree should have been calculated using the new Output IDs.
+8. Set `newblock.next_consensus_program` to `oldblock.block_commitment.next_consensus_program`
+
+#### VM mapping
+
+PROGRAM:       same
+NEXTPROGRAM:   same
+BLOCKTIME:     same
+BLOCKSIGHASH:  newblock.id()
diff --git a/protocol/bc/tx.go b/protocol/bc/tx.go
new file mode 100644 (file)
index 0000000..27dbaa2
--- /dev/null
@@ -0,0 +1,95 @@
+package bc
+
+import (
+       "chain/crypto/sha3pool"
+       "chain/errors"
+)
+
+// Tx is a wrapper for the entries-based representation of a transaction.
+type Tx struct {
+       *TxHeader
+       ID       Hash
+       Entries  map[Hash]Entry
+       InputIDs []Hash // 1:1 correspondence with TxData.Inputs
+
+       // IDs of reachable entries of various kinds
+       NonceIDs       []Hash
+       SpentOutputIDs []Hash
+}
+
+func (tx *Tx) SigHash(n uint32) (hash Hash) {
+       hasher := sha3pool.Get256()
+       defer sha3pool.Put256(hasher)
+
+       tx.InputIDs[n].WriteTo(hasher)
+       tx.ID.WriteTo(hasher)
+       hash.ReadFrom(hasher)
+       return hash
+}
+
+// Convenience routines for accessing entries of specific types by ID.
+
+var (
+       ErrEntryType    = errors.New("invalid entry type")
+       ErrMissingEntry = errors.New("missing entry")
+)
+
+func (tx *Tx) TimeRange(id Hash) (*TimeRange, error) {
+       e, ok := tx.Entries[id]
+       if !ok || e == nil {
+               return nil, errors.Wrapf(ErrMissingEntry, "id %x", id.Bytes())
+       }
+       tr, ok := e.(*TimeRange)
+       if !ok {
+               return nil, errors.Wrapf(ErrEntryType, "entry %x has unexpected type %T", id.Bytes(), e)
+       }
+       return tr, nil
+}
+
+func (tx *Tx) Output(id Hash) (*Output, error) {
+       e, ok := tx.Entries[id]
+       if !ok || e == nil {
+               return nil, errors.Wrapf(ErrMissingEntry, "id %x", id.Bytes())
+       }
+       o, ok := e.(*Output)
+       if !ok {
+               return nil, errors.Wrapf(ErrEntryType, "entry %x has unexpected type %T", id.Bytes(), e)
+       }
+       return o, nil
+}
+
+func (tx *Tx) Spend(id Hash) (*Spend, error) {
+       e, ok := tx.Entries[id]
+       if !ok || e == nil {
+               return nil, errors.Wrapf(ErrMissingEntry, "id %x", id.Bytes())
+       }
+       sp, ok := e.(*Spend)
+       if !ok {
+               return nil, errors.Wrapf(ErrEntryType, "entry %x has unexpected type %T", id.Bytes(), e)
+       }
+       return sp, nil
+}
+
+func (tx *Tx) Issuance(id Hash) (*Issuance, error) {
+       e, ok := tx.Entries[id]
+       if !ok || e == nil {
+               return nil, errors.Wrapf(ErrMissingEntry, "id %x", id.Bytes())
+       }
+       iss, ok := e.(*Issuance)
+       if !ok {
+               return nil, errors.Wrapf(ErrEntryType, "entry %x has unexpected type %T", id.Bytes(), e)
+       }
+       return iss, nil
+}
+
+func (tx *Tx) Nonce(id Hash) (*Nonce, error) {
+       e, ok := tx.Entries[id]
+       if !ok || e == nil {
+               return nil, errors.Wrapf(ErrMissingEntry, "id %x", id.Bytes())
+       }
+       nonce, ok := e.(*Nonce)
+       if !ok {
+               return nil, errors.Wrapf(ErrEntryType, "entry %x has unexpected type %T", id.Bytes(), e)
+       }
+       return nonce, nil
+}
diff --git a/protocol/bc/txheader.go b/protocol/bc/txheader.go
new file mode 100644 (file)
index 0000000..4c87978
--- /dev/null
@@ -0,0 +1,29 @@
+package bc
+
+import "io"
+
+// TxHeader contains header information for a transaction. Every
+// transaction on a blockchain contains exactly one TxHeader. The ID
+// of the TxHeader is the ID of the transaction. TxHeader satisfies
+// the Entry interface.
+
+func (TxHeader) typ() string { return "txheader" }
+func (h *TxHeader) writeForHash(w io.Writer) {
+       mustWriteForHash(w, h.Version)
+       mustWriteForHash(w, h.ResultIds)
+       mustWriteForHash(w, h.Data)
+       mustWriteForHash(w, h.MinTimeMs)
+       mustWriteForHash(w, h.MaxTimeMs)
+       mustWriteForHash(w, h.ExtHash)
+}
+
+// NewTxHeader creates an new TxHeader.
+func NewTxHeader(version uint64, resultIDs []*Hash, data *Hash, minTimeMS, maxTimeMS uint64) *TxHeader {
+       return &TxHeader{
+               Version:   version,
+               ResultIds: resultIDs,
+               Data:      data,
+               MinTimeMs: minTimeMS,
+               MaxTimeMs: maxTimeMS,
+       }
+}
diff --git a/protocol/block.go b/protocol/block.go
new file mode 100644 (file)
index 0000000..6b9a0fc
--- /dev/null
@@ -0,0 +1,271 @@
+package protocol
+
+import (
+       "context"
+       "fmt"
+       "time"
+
+       "chain/crypto/ed25519"
+       "chain/errors"
+       "chain/log"
+       "chain/protocol/bc"
+       "chain/protocol/bc/legacy"
+       "chain/protocol/state"
+       "chain/protocol/validation"
+       "chain/protocol/vm/vmutil"
+)
+
+// maxBlockTxs limits the number of transactions
+// included in each block.
+const maxBlockTxs = 10000
+
+// saveSnapshotFrequency stores how often to save a state
+// snapshot to the Store.
+const saveSnapshotFrequency = time.Hour
+
+var (
+       // ErrBadBlock is returned when a block is invalid.
+       ErrBadBlock = errors.New("invalid block")
+
+       // ErrStaleState is returned when the Chain does not have a current
+       // blockchain state.
+       ErrStaleState = errors.New("stale blockchain state")
+
+       // ErrBadStateRoot is returned when the computed assets merkle root
+       // disagrees with the one declared in a block header.
+       ErrBadStateRoot = errors.New("invalid state merkle root")
+)
+
+// GetBlock returns the block at the given height, if there is one,
+// otherwise it returns an error.
+func (c *Chain) GetBlock(ctx context.Context, height uint64) (*legacy.Block, error) {
+       return c.store.GetBlock(ctx, height)
+}
+
+// GenerateBlock generates a valid, but unsigned, candidate block from
+// the current pending transaction pool. It returns the new block and
+// a snapshot of what the state snapshot is if the block is applied.
+//
+// After generating the block, the pending transaction pool will be
+// empty.
+func (c *Chain) GenerateBlock(ctx context.Context, prev *legacy.Block, snapshot *state.Snapshot, now time.Time, txs []*legacy.Tx) (*legacy.Block, *state.Snapshot, error) {
+       // TODO(kr): move this into a lower-level package (e.g. chain/protocol/bc)
+       // so that other packages (e.g. chain/protocol/validation) unit tests can
+       // call this function.
+
+       timestampMS := bc.Millis(now)
+       if timestampMS < prev.TimestampMS {
+               return nil, nil, fmt.Errorf("timestamp %d is earlier than prevblock timestamp %d", timestampMS, prev.TimestampMS)
+       }
+
+       // Make a copy of the snapshot that we can apply our changes to.
+       newSnapshot := state.Copy(c.state.snapshot)
+       newSnapshot.PruneNonces(timestampMS)
+
+       b := &legacy.Block{
+               BlockHeader: legacy.BlockHeader{
+                       Version:           1,
+                       Height:            prev.Height + 1,
+                       PreviousBlockHash: prev.Hash(),
+                       TimestampMS:       timestampMS,
+                       BlockCommitment: legacy.BlockCommitment{
+                               ConsensusProgram: prev.ConsensusProgram,
+                       },
+               },
+       }
+
+       var txEntries []*bc.Tx
+
+       for _, tx := range txs {
+               if len(b.Transactions) >= maxBlockTxs {
+                       break
+               }
+
+               // Filter out transactions that are not well-formed.
+               err := c.ValidateTx(tx.Tx)
+               if err != nil {
+                       // TODO(bobg): log this?
+                       continue
+               }
+
+               // Filter out transactions that are not yet valid, or no longer
+               // valid, per the block's timestamp.
+               if tx.Tx.MinTimeMs > 0 && tx.Tx.MinTimeMs > b.TimestampMS {
+                       // TODO(bobg): log this?
+                       continue
+               }
+               if tx.Tx.MaxTimeMs > 0 && tx.Tx.MaxTimeMs < b.TimestampMS {
+                       // TODO(bobg): log this?
+                       continue
+               }
+
+               // Filter out double-spends etc.
+               err = newSnapshot.ApplyTx(tx.Tx)
+               if err != nil {
+                       // TODO(bobg): log this?
+                       continue
+               }
+
+               b.Transactions = append(b.Transactions, tx)
+               txEntries = append(txEntries, tx.Tx)
+       }
+
+       var err error
+
+       b.TransactionsMerkleRoot, err = bc.MerkleRoot(txEntries)
+       if err != nil {
+               return nil, nil, errors.Wrap(err, "calculating tx merkle root")
+       }
+
+       b.AssetsMerkleRoot = newSnapshot.Tree.RootHash()
+
+       return b, newSnapshot, nil
+}
+
+// ValidateBlock validates an incoming block in advance of applying it
+// to a snapshot (with ApplyValidBlock) and committing it to the
+// blockchain (with CommitAppliedBlock).
+func (c *Chain) ValidateBlock(block, prev *legacy.Block) error {
+       blockEnts := legacy.MapBlock(block)
+       prevEnts := legacy.MapBlock(prev)
+       err := validation.ValidateBlock(blockEnts, prevEnts, c.InitialBlockHash, c.ValidateTx)
+       if err != nil {
+               return errors.Sub(ErrBadBlock, err)
+       }
+       if block.Height > 1 {
+               err = validation.ValidateBlockSig(blockEnts, prevEnts.NextConsensusProgram)
+       }
+       return errors.Sub(ErrBadBlock, err)
+}
+
+// ApplyValidBlock creates an updated snapshot without validating the
+// block.
+func (c *Chain) ApplyValidBlock(block *legacy.Block) (*state.Snapshot, error) {
+       newSnapshot := state.Copy(c.state.snapshot)
+       err := newSnapshot.ApplyBlock(legacy.MapBlock(block))
+       if err != nil {
+               return nil, err
+       }
+       if block.AssetsMerkleRoot != newSnapshot.Tree.RootHash() {
+               return nil, ErrBadStateRoot
+       }
+       return newSnapshot, nil
+}
+
+// CommitBlock commits a block to the blockchain. The block
+// must already have been applied with ApplyValidBlock or
+// ApplyNewBlock, which will have produced the new snapshot that's
+// required here.
+//
+// This function saves the block to the store and sometimes (not more
+// often than saveSnapshotFrequency) saves the state tree to the
+// store. New-block callbacks (via asynchronous block-processor pins)
+// are triggered.
+//
+// TODO(bobg): rename to CommitAppliedBlock for clarity (deferred from https://github.com/chain/chain/pull/788)
+func (c *Chain) CommitAppliedBlock(ctx context.Context, block *legacy.Block, snapshot *state.Snapshot) error {
+       // SaveBlock is the linearization point. Once the block is committed
+       // to persistent storage, the block has been applied and everything
+       // else can be derived from that block.
+       err := c.store.SaveBlock(ctx, block)
+       if err != nil {
+               return errors.Wrap(err, "storing block")
+       }
+       if block.Time().After(c.lastQueuedSnapshot.Add(saveSnapshotFrequency)) {
+               c.queueSnapshot(ctx, block.Height, block.Time(), snapshot)
+       }
+
+       err = c.store.FinalizeBlock(ctx, block.Height)
+       if err != nil {
+               return errors.Wrap(err, "finalizing block")
+       }
+
+       // c.setState will update the local blockchain state and height.
+       // When c.store is a txdb.Store, and c has been initialized with a
+       // channel from txdb.ListenBlocks, then the above call to
+       // c.store.FinalizeBlock will have done a postgresql NOTIFY and
+       // that will wake up the goroutine in NewChain, which also calls
+       // setHeight.  But duplicate calls with the same blockheight are
+       // harmless; and the following call is required in the cases where
+       // it's not redundant.
+       c.setState(block, snapshot)
+       return nil
+}
+
+func (c *Chain) queueSnapshot(ctx context.Context, height uint64, timestamp time.Time, s *state.Snapshot) {
+       // Non-blockingly queue the snapshot for storage.
+       ps := pendingSnapshot{height: height, snapshot: s}
+       select {
+       case c.pendingSnapshots <- ps:
+               c.lastQueuedSnapshot = timestamp
+       default:
+               // Skip it; saving snapshots is taking longer than the snapshotting period.
+               log.Printf(ctx, "snapshot storage is taking too long; last queued at %s",
+                       c.lastQueuedSnapshot)
+       }
+}
+
+func (c *Chain) setHeight(h uint64) {
+       // We call setHeight from two places independently:
+       // CommitBlock and the Postgres LISTEN goroutine.
+       // This means we can get here twice for each block,
+       // and any of them might be arbitrarily delayed,
+       // which means h might be from the past.
+       // Detect and discard these duplicate calls.
+
+       c.state.cond.L.Lock()
+       defer c.state.cond.L.Unlock()
+
+       if h <= c.state.height {
+               return
+       }
+       c.state.height = h
+       c.state.cond.Broadcast()
+}
+
+// ValidateBlockForSig performs validation on an incoming _unsigned_
+// block in preparation for signing it. By definition it does not
+// execute the consensus program.
+func (c *Chain) ValidateBlockForSig(ctx context.Context, block *legacy.Block) error {
+       var prev *legacy.Block
+
+       if block.Height > 1 {
+               var err error
+               prev, err = c.GetBlock(ctx, block.Height-1)
+               if err != nil {
+                       return errors.Wrap(err, "getting previous block")
+               }
+       }
+
+       err := validation.ValidateBlock(legacy.MapBlock(block), legacy.MapBlock(prev), c.InitialBlockHash, c.ValidateTx)
+       return errors.Sub(ErrBadBlock, err)
+}
+
+func NewInitialBlock(pubkeys []ed25519.PublicKey, nSigs int, timestamp time.Time) (*legacy.Block, error) {
+       // TODO(kr): move this into a lower-level package (e.g. chain/protocol/bc)
+       // so that other packages (e.g. chain/protocol/validation) unit tests can
+       // call this function.
+
+       script, err := vmutil.BlockMultiSigProgram(pubkeys, nSigs)
+       if err != nil {
+               return nil, err
+       }
+
+       root, err := bc.MerkleRoot(nil) // calculate the zero value of the tx merkle root
+       if err != nil {
+               return nil, errors.Wrap(err, "calculating zero value of tx merkle root")
+       }
+
+       b := &legacy.Block{
+               BlockHeader: legacy.BlockHeader{
+                       Version:     1,
+                       Height:      1,
+                       TimestampMS: bc.Millis(timestamp),
+                       BlockCommitment: legacy.BlockCommitment{
+                               TransactionsMerkleRoot: root,
+                               ConsensusProgram:       script,
+                       },
+               },
+       }
+       return b, nil
+}
diff --git a/protocol/block_test.go b/protocol/block_test.go
new file mode 100644 (file)
index 0000000..05493cf
--- /dev/null
@@ -0,0 +1,277 @@
+package protocol
+
+import (
+       "context"
+       "encoding/hex"
+       "testing"
+       "time"
+
+       "chain/protocol/bc"
+       "chain/protocol/bc/legacy"
+       "chain/protocol/prottest/memstore"
+       "chain/protocol/state"
+       "chain/testutil"
+)
+
+func TestGetBlock(t *testing.T) {
+       ctx := context.Background()
+
+       b1 := &legacy.Block{BlockHeader: legacy.BlockHeader{Height: 1}}
+       noBlocks := memstore.New()
+       oneBlock := memstore.New()
+       oneBlock.SaveBlock(ctx, b1)
+       oneBlock.SaveSnapshot(ctx, 1, state.Empty())
+
+       cases := []struct {
+               store   Store
+               want    *legacy.Block
+               wantErr bool
+       }{
+               {noBlocks, nil, true},
+               {oneBlock, b1, false},
+       }
+
+       for _, test := range cases {
+               c, err := NewChain(ctx, b1.Hash(), test.store, nil)
+               if err != nil {
+                       testutil.FatalErr(t, err)
+               }
+               got, gotErr := c.GetBlock(ctx, c.Height())
+               if !testutil.DeepEqual(got, test.want) {
+                       t.Errorf("got latest = %+v want %+v", got, test.want)
+               }
+               if (gotErr != nil) != test.wantErr {
+                       t.Errorf("got latest err = %q want err?: %t", gotErr, test.wantErr)
+               }
+       }
+}
+
+func TestNoTimeTravel(t *testing.T) {
+       ctx := context.Background()
+       c, err := NewChain(ctx, bc.Hash{}, memstore.New(), nil)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       c.setHeight(1)
+       c.setHeight(2)
+
+       c.setHeight(1) // don't go backward
+       if c.state.height != 2 {
+               t.Fatalf("c.state.height = %d want 2", c.state.height)
+       }
+}
+
+func TestWaitForBlockSoonAlreadyExists(t *testing.T) {
+       c, _ := newTestChain(t, time.Now())
+       makeEmptyBlock(t, c) // height=2
+       makeEmptyBlock(t, c) // height=3
+
+       err := <-c.BlockSoonWaiter(context.Background(), 2)
+       if err != nil {
+               t.Fatal(err)
+       }
+}
+
+func TestWaitForBlockSoonDistantFuture(t *testing.T) {
+       c, _ := newTestChain(t, time.Now())
+
+       got := <-c.BlockSoonWaiter(context.Background(), 100) // distant future
+       want := ErrTheDistantFuture
+       if got != want {
+               t.Errorf("BlockSoonWaiter(100) = %+v want %+v", got, want)
+       }
+}
+
+func TestWaitForBlockSoonWaits(t *testing.T) {
+       // This test is inherently racy. It's possible
+       // that the block creation might run before
+       // the wait's internal test loop finds no block.
+       // In that case, the test will pass, but it will
+       // not have tested anything.
+       //
+       // It's the best we can do.
+
+       c, _ := newTestChain(t, time.Now())
+       makeEmptyBlock(t, c) // height=2
+
+       go func() {
+               time.Sleep(10 * time.Millisecond) // sorry for the slow test ï˜”
+               makeEmptyBlock(t, c)              // height=3
+       }()
+
+       err := <-c.BlockSoonWaiter(context.Background(), 3)
+       if err != nil {
+               t.Fatal(err)
+       }
+       if g := c.Height(); g != 3 {
+               t.Errorf("height after waiting = %d want 3", g)
+       }
+}
+
+func TestWaitForBlockSoonTimesout(t *testing.T) {
+       c, _ := newTestChain(t, time.Now())
+       go func() {
+               makeEmptyBlock(t, c) // height=2
+       }()
+
+       ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
+       defer cancel()
+
+       err := <-c.BlockSoonWaiter(ctx, 3)
+       if err != ctx.Err() {
+               t.Fatalf("expected timeout err, got %v", err)
+       }
+}
+
+func TestGenerateBlock(t *testing.T) {
+       ctx := context.Background()
+       now := time.Unix(233400000, 0)
+       c, b1 := newTestChain(t, now)
+
+       initialBlockHash := b1.Hash()
+       assetID := bc.ComputeAssetID(nil, &initialBlockHash, 1, &bc.EmptyStringHash)
+
+       txs := []*legacy.Tx{
+               legacy.NewTx(legacy.TxData{
+                       Version: 1,
+                       MinTime: 233400000000,
+                       MaxTime: 233400000001,
+                       Inputs: []*legacy.TxInput{
+                               legacy.NewIssuanceInput([]byte{1}, 50, nil, initialBlockHash, nil, [][]byte{
+                                       nil,
+                                       mustDecodeHex("30450221009037e1d39b7d59d24eba8012baddd5f4ab886a51b46f52b7c479ddfa55eeb5c5022076008409243475b25dfba6db85e15cf3d74561a147375941e4830baa69769b5101"),
+                                       mustDecodeHex("51210210b002870438af79b829bc22c4505e14779ef0080c411ad497d7a0846ee0af6f51ae")}, nil),
+                       },
+                       Outputs: []*legacy.TxOutput{
+                               legacy.NewTxOutput(assetID, 50, mustDecodeHex("a9145881cd104f8d64635751ac0f3c0decf9150c110687"), nil),
+                       },
+               }),
+               legacy.NewTx(legacy.TxData{
+                       Version: 1,
+                       MinTime: 233400000000,
+                       MaxTime: 233400000001,
+                       Inputs: []*legacy.TxInput{
+                               legacy.NewIssuanceInput([]byte{2}, 50, nil, initialBlockHash, nil, [][]byte{
+                                       nil,
+                                       mustDecodeHex("3045022100f3bcffcfd6a1ce9542b653500386cd0ee7b9c86c59390ca0fc0238c0ebe3f1d6022065ac468a51a016842660c3a616c99a9aa5109a3bad1877ba3e0f010f3972472e01"),
+                                       mustDecodeHex("51210210b002870438af79b829bc22c4505e14779ef0080c411ad497d7a0846ee0af6f51ae"),
+                               }, nil),
+                       },
+                       Outputs: []*legacy.TxOutput{
+                               legacy.NewTxOutput(assetID, 50, mustDecodeHex("a914c171e443e05b953baa7b7d834028ed91e47b4d0b87"), nil),
+                       },
+               }),
+       }
+
+       got, _, err := c.GenerateBlock(ctx, b1, state.Empty(), now, txs)
+       if err != nil {
+               t.Fatalf("err got = %v want nil", err)
+       }
+
+       // TODO(bobg): verify these hashes are correct
+       wantTxRoot := mustDecodeHash("ab5f5f111beb1e6b49da8334360589c7da3aac1cdd61067ea9a55bec47cb745c")
+       wantAssetsRoot := mustDecodeHash("a31a9b5f71a6d6fa0c87361db4a98c9a82f603f9d9ff584f6613b9d56ccf5ebd")
+
+       want := &legacy.Block{
+               BlockHeader: legacy.BlockHeader{
+                       Version:           1,
+                       Height:            2,
+                       PreviousBlockHash: b1.Hash(),
+                       TimestampMS:       bc.Millis(now),
+                       BlockCommitment: legacy.BlockCommitment{
+                               TransactionsMerkleRoot: wantTxRoot,
+                               AssetsMerkleRoot:       wantAssetsRoot,
+                               ConsensusProgram:       b1.ConsensusProgram,
+                       },
+               },
+               Transactions: txs,
+       }
+
+       if !testutil.DeepEqual(got, want) {
+               t.Errorf("generated block:\ngot:  %+v\nwant: %+v", got, want)
+       }
+}
+
+func TestValidateBlockForSig(t *testing.T) {
+       initialBlock, err := NewInitialBlock(testutil.TestPubs, 1, time.Now())
+       if err != nil {
+               t.Fatal("unexpected error ", err)
+       }
+
+       ctx := context.Background()
+       c, err := NewChain(ctx, initialBlock.Hash(), memstore.New(), nil)
+       if err != nil {
+               t.Fatal("unexpected error ", err)
+       }
+
+       err = c.ValidateBlockForSig(ctx, initialBlock)
+       if err != nil {
+               t.Error("unexpected error ", err)
+       }
+}
+
+// newTestChain returns a new Chain using memstore for storage,
+// along with an initial block b1 (with a 0/0 multisig program).
+// It commits b1 before returning.
+func newTestChain(tb testing.TB, ts time.Time) (c *Chain, b1 *legacy.Block) {
+       ctx := context.Background()
+
+       var err error
+
+       b1, err = NewInitialBlock(nil, 0, ts)
+       if err != nil {
+               testutil.FatalErr(tb, err)
+       }
+       c, err = NewChain(ctx, b1.Hash(), memstore.New(), nil)
+       if err != nil {
+               testutil.FatalErr(tb, err)
+       }
+       // TODO(tessr): consider adding MaxIssuanceWindow to NewChain
+       c.MaxIssuanceWindow = 48 * time.Hour
+       err = c.CommitAppliedBlock(ctx, b1, state.Empty())
+       if err != nil {
+               testutil.FatalErr(tb, err)
+       }
+       return c, b1
+}
+
+func makeEmptyBlock(tb testing.TB, c *Chain) {
+       ctx := context.Background()
+
+       curBlock, err := c.GetBlock(ctx, c.Height())
+       if err != nil {
+               testutil.FatalErr(tb, err)
+       }
+
+       if len(curBlock.Transactions) > 0 {
+               tb.Fatal("cannot make nonempty block")
+       }
+
+       curState := state.Empty()
+
+       nextBlock, nextState, err := c.GenerateBlock(ctx, curBlock, curState, time.Now(), nil)
+       if err != nil {
+               testutil.FatalErr(tb, err)
+       }
+       err = c.CommitAppliedBlock(ctx, nextBlock, nextState)
+       if err != nil {
+               testutil.FatalErr(tb, err)
+       }
+}
+
+func mustDecodeHex(s string) []byte {
+       data, err := hex.DecodeString(s)
+       if err != nil {
+               panic(err)
+       }
+       return data
+}
+
+func mustDecodeHash(s string) (h bc.Hash) {
+       err := h.UnmarshalText([]byte(s))
+       if err != nil {
+               panic(err)
+       }
+       return h
+}
diff --git a/protocol/patricia/patricia.go b/protocol/patricia/patricia.go
new file mode 100644 (file)
index 0000000..b99297b
--- /dev/null
@@ -0,0 +1,289 @@
+// Package patricia computes the Merkle Patricia Tree Hash of a
+// set of bit strings, as described in the Chain Protocol spec.
+// See https://chain.com/docs/protocol/specifications/data#merkle-patricia-tree.
+// Because a patricia tree (a radix tree with a radix of 2)
+// provides efficient incremental updates, so does the Merkle
+// Patricia Tree Hash computation, making this structure suitable
+// for the blockchain full-state commitment.
+//
+// Type Tree represents a set, where the elements are bit strings.
+// The set must be prefix-free -- no item can be a prefix of
+// any other -- enforced by Insert.
+// The length of each bit string must also be a multiple of eight,
+// because the interface uses []byte to represent an item.
+//
+// The nodes in the tree form an immutable persistent data
+// structure. It is okay to copy a Tree struct,
+// which contains the root of the tree, to obtain a new tree
+// with the same contents. The time to make such a copy is
+// independent of the size of the tree.
+package patricia
+
+import (
+       "bytes"
+
+       "chain/crypto/sha3pool"
+       "chain/errors"
+       "chain/protocol/bc"
+)
+
+var (
+       leafPrefix     = []byte{0x00}
+       interiorPrefix = []byte{0x01}
+)
+
+// Tree implements a patricia tree.
+type Tree struct {
+       root *node
+}
+
+// WalkFunc is the type of the function called for each item
+// visited by Walk. If an error is returned, processing stops.
+type WalkFunc func(item []byte) error
+
+// Walk walks t calling walkFn for each item.
+// If an error is returned by walkFn at any point,
+// processing is stopped and the error is returned.
+func Walk(t *Tree, walkFn WalkFunc) error {
+       if t.root == nil {
+               return nil
+       }
+       return walk(t.root, walkFn)
+}
+
+func walk(n *node, walkFn WalkFunc) error {
+       if n.isLeaf {
+               return walkFn(n.Key())
+       }
+
+       err := walk(n.children[0], walkFn)
+       if err != nil {
+               return err
+       }
+
+       err = walk(n.children[1], walkFn)
+       return err
+}
+
+// Contains returns whether t contains item.
+func (t *Tree) Contains(item []byte) bool {
+       if t.root == nil {
+               return false
+       }
+
+       key := bitKey(item)
+       n := lookup(t.root, key)
+
+       var hash bc.Hash
+       h := sha3pool.Get256()
+       h.Write(leafPrefix)
+       h.Write(item)
+       hash.ReadFrom(h)
+       sha3pool.Put256(h)
+       return n != nil && n.Hash() == hash
+}
+
+func lookup(n *node, key []uint8) *node {
+       if bytes.Equal(n.key, key) {
+               if !n.isLeaf {
+                       return nil
+               }
+               return n
+       }
+       if !bytes.HasPrefix(key, n.key) {
+               return nil
+       }
+
+       bit := key[len(n.key)]
+       return lookup(n.children[bit], key)
+}
+
+// Insert inserts item into t.
+//
+// It is an error for item to be a prefix of an element
+// in t or to contain an element in t as a prefix.
+// If item itself is already in t, Insert does nothing
+// (and this is not an error).
+func (t *Tree) Insert(item []byte) error {
+       key := bitKey(item)
+
+       var hash bc.Hash
+       h := sha3pool.Get256()
+       h.Write(leafPrefix)
+       h.Write(item)
+       hash.ReadFrom(h)
+       sha3pool.Put256(h)
+
+       if t.root == nil {
+               t.root = &node{key: key, hash: &hash, isLeaf: true}
+               return nil
+       }
+
+       var err error
+       t.root, err = insert(t.root, key, &hash)
+       return err
+}
+
+func insert(n *node, key []uint8, hash *bc.Hash) (*node, error) {
+       if bytes.Equal(n.key, key) {
+               if !n.isLeaf {
+                       return n, errors.Wrap(errors.New("key provided is a prefix to other keys"))
+               }
+
+               n = &node{
+                       isLeaf: true,
+                       key:    n.key,
+                       hash:   hash,
+               }
+               return n, nil
+       }
+
+       if bytes.HasPrefix(key, n.key) {
+               if n.isLeaf {
+                       return n, errors.Wrap(errors.New("key provided is a prefix to other keys"))
+               }
+               bit := key[len(n.key)]
+
+               child := n.children[bit]
+               child, err := insert(child, key, hash)
+               if err != nil {
+                       return n, err
+               }
+               newNode := new(node)
+               *newNode = *n
+               newNode.children[bit] = child // mutation is ok because newNode hasn't escaped yet
+               newNode.hash = nil
+               return newNode, nil
+       }
+
+       common := commonPrefixLen(n.key, key)
+       newNode := &node{
+               key: key[:common],
+       }
+       newNode.children[key[common]] = &node{
+               key:    key,
+               hash:   hash,
+               isLeaf: true,
+       }
+       newNode.children[1-key[common]] = n
+       return newNode, nil
+}
+
+// Delete removes item from t, if present.
+func (t *Tree) Delete(item []byte) {
+       key := bitKey(item)
+
+       if t.root != nil {
+               t.root = delete(t.root, key)
+       }
+}
+
+func delete(n *node, key []uint8) *node {
+       if bytes.Equal(key, n.key) {
+               if !n.isLeaf {
+                       return n
+               }
+               return nil
+       }
+
+       if !bytes.HasPrefix(key, n.key) {
+               return n
+       }
+
+       bit := key[len(n.key)]
+       newChild := delete(n.children[bit], key)
+
+       if newChild == nil {
+               return n.children[1-bit]
+       }
+
+       newNode := new(node)
+       *newNode = *n
+       newNode.key = newChild.key[:len(n.key)] // only use slices of leaf node keys
+       newNode.children[bit] = newChild
+       newNode.hash = nil
+
+       return newNode
+}
+
+// RootHash returns the Merkle root of the tree.
+func (t *Tree) RootHash() bc.Hash {
+       root := t.root
+       if root == nil {
+               return bc.Hash{}
+       }
+       return root.Hash()
+}
+
+// bitKey takes a byte array and returns a key that can
+// be used inside insert and delete operations.
+func bitKey(byteKey []byte) []uint8 {
+       key := make([]uint8, 0, len(byteKey)*8)
+       for _, b := range byteKey {
+               for i := uint(0); i < 8; i++ {
+                       key = append(key, (b>>(7-i))&1)
+               }
+       }
+       return key
+}
+
+// byteKey is the inverse of bitKey.
+func byteKey(bitKey []uint8) (key []byte) {
+       key = make([]byte, len(bitKey)/8)
+       for i := uint(0); i < uint(len(key)); i++ {
+               var b byte
+               for j := uint(0); j < 8; j++ {
+                       bit := bitKey[i*8+j]
+                       b |= bit << (7 - j)
+               }
+               key[i] = b
+       }
+       return key
+}
+
+func commonPrefixLen(a, b []uint8) int {
+       var common int
+       for i := 0; i < len(a) && i < len(b); i++ {
+               if a[i] != b[i] {
+                       break
+               }
+               common++
+       }
+       return common
+}
+
+// node is a leaf or branch node in a tree
+type node struct {
+       key      []uint8
+       hash     *bc.Hash
+       isLeaf   bool
+       children [2]*node
+}
+
+// Key returns the key for the current node as bytes, as it
+// was provided to Insert.
+func (n *node) Key() []byte { return byteKey(n.key) }
+
+// Hash will return the hash for this node.
+func (n *node) Hash() bc.Hash {
+       n.calcHash()
+       return *n.hash
+}
+
+func (n *node) calcHash() {
+       if n.hash != nil {
+               return
+       }
+
+       h := sha3pool.Get256()
+       h.Write(interiorPrefix)
+       for _, c := range n.children {
+               c.calcHash()
+               c.hash.WriteTo(h)
+       }
+
+       var hash bc.Hash
+       hash.ReadFrom(h)
+       n.hash = &hash
+       sha3pool.Put256(h)
+}
diff --git a/protocol/patricia/patricia_test.go b/protocol/patricia/patricia_test.go
new file mode 100644 (file)
index 0000000..fc15b87
--- /dev/null
@@ -0,0 +1,599 @@
+package patricia
+
+import (
+       "fmt"
+       "log"
+       "math/rand"
+       "strconv"
+       "strings"
+       "testing"
+       "testing/quick"
+
+       "golang.org/x/crypto/sha3"
+
+       "chain/protocol/bc"
+       "chain/testutil"
+)
+
+func BenchmarkInserts(b *testing.B) {
+       const nodes = 10000
+       for i := 0; i < b.N; i++ {
+               r := rand.New(rand.NewSource(12345))
+               tr := new(Tree)
+               for j := 0; j < nodes; j++ {
+                       var h [32]byte
+                       _, err := r.Read(h[:])
+                       if err != nil {
+                               b.Fatal(err)
+                       }
+
+                       err = tr.Insert(h[:])
+                       if err != nil {
+                               b.Fatal(err)
+                       }
+               }
+       }
+}
+
+func BenchmarkInsertsRootHash(b *testing.B) {
+       const nodes = 10000
+       for i := 0; i < b.N; i++ {
+               r := rand.New(rand.NewSource(12345))
+               tr := new(Tree)
+               for j := 0; j < nodes; j++ {
+                       var h [32]byte
+                       _, err := r.Read(h[:])
+                       if err != nil {
+                               b.Fatal(err)
+                       }
+
+                       err = tr.Insert(h[:])
+                       if err != nil {
+                               b.Fatal(err)
+                       }
+               }
+               tr.RootHash()
+       }
+}
+
+func TestRootHashBug(t *testing.T) {
+       tr := new(Tree)
+
+       err := tr.Insert([]byte{0x94})
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = tr.Insert([]byte{0x36})
+       if err != nil {
+               t.Fatal(err)
+       }
+       before := tr.RootHash()
+       err = tr.Insert([]byte{0xba})
+       if err != nil {
+               t.Fatal(err)
+       }
+       if tr.RootHash() == before {
+               t.Errorf("before and after root hash is %s", before.String())
+       }
+}
+
+func TestLeafVsInternalNodes(t *testing.T) {
+       tr0 := new(Tree)
+
+       err := tr0.Insert([]byte{0x01})
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = tr0.Insert([]byte{0x02})
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = tr0.Insert([]byte{0x03})
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = tr0.Insert([]byte{0x04})
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       // Force calculation of all the hashes.
+       tr0.RootHash()
+       t.Logf("first child = %s, %t", tr0.root.children[0].hash, tr0.root.children[0].isLeaf)
+       t.Logf("second child = %s, %t", tr0.root.children[1].hash, tr0.root.children[1].isLeaf)
+
+       // Create a second tree using an internal node from tr1.
+       tr1 := new(Tree)
+       err = tr1.Insert(tr0.root.children[0].hash.Bytes()) // internal node of tr0
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = tr1.Insert(tr0.root.children[1].hash.Bytes()) // sibling leaf node of above node ^
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       if tr1.RootHash() == tr0.RootHash() {
+               t.Errorf("tr0 and tr1 have matching root hashes: %x", tr1.RootHash().Bytes())
+       }
+}
+
+func TestRootHashInsertQuickCheck(t *testing.T) {
+       tr := new(Tree)
+
+       f := func(b [32]byte) bool {
+               before := tr.RootHash()
+               err := tr.Insert(b[:])
+               if err != nil {
+                       return false
+               }
+               return before != tr.RootHash()
+       }
+       if err := quick.Check(f, nil); err != nil {
+               t.Error(err)
+       }
+}
+
+func TestLookup(t *testing.T) {
+       tr := &Tree{
+               root: &node{key: bools("11111111"), hash: hashPtr(hashForLeaf(bits("11111111"))), isLeaf: true},
+       }
+       got := lookup(tr.root, bitKey(bits("11111111")))
+       if !testutil.DeepEqual(got, tr.root) {
+               t.Log("lookup on 1-node tree")
+               t.Fatalf("got:\n%swant:\n%s", prettyNode(got, 0), prettyNode(tr.root, 0))
+       }
+
+       tr = &Tree{
+               root: &node{key: bools("11111110"), hash: hashPtr(hashForLeaf(bits("11111110"))), isLeaf: true},
+       }
+       got = lookup(tr.root, bitKey(bits("11111111")))
+       if got != nil {
+               t.Log("lookup nonexistent key on 1-node tree")
+               t.Fatalf("got:\n%swant nil", prettyNode(got, 0))
+       }
+
+       tr = &Tree{
+               root: &node{
+                       key:  bools("1111"),
+                       hash: hashPtr(hashForNonLeaf(hashForLeaf(bits("11110000")), hashForLeaf(bits("11111111")))),
+                       children: [2]*node{
+                               {key: bools("11110000"), hash: hashPtr(hashForLeaf(bits("11110000"))), isLeaf: true},
+                               {key: bools("11111111"), hash: hashPtr(hashForLeaf(bits("11111111"))), isLeaf: true},
+                       },
+               },
+       }
+       got = lookup(tr.root, bitKey(bits("11110000")))
+       if !testutil.DeepEqual(got, tr.root.children[0]) {
+               t.Log("lookup root's first child")
+               t.Fatalf("got:\n%swant:\n%s", prettyNode(got, 0), prettyNode(tr.root.children[0], 0))
+       }
+
+       tr = &Tree{
+               root: &node{
+                       key: bools("1111"),
+                       hash: hashPtr(hashForNonLeaf(
+                               hashForLeaf(bits("11110000")),
+                               hashForNonLeaf(hashForLeaf(bits("11111100")), hashForLeaf(bits("11111111"))),
+                       )),
+                       children: [2]*node{
+                               {key: bools("11110000"), hash: hashPtr(hashForLeaf(bits("11110000"))), isLeaf: true},
+                               {
+                                       key:  bools("111111"),
+                                       hash: hashPtr(hashForNonLeaf(hashForLeaf(bits("11111100")), hashForLeaf(bits("11111111")))),
+                                       children: [2]*node{
+                                               {key: bools("11111100"), hash: hashPtr(hashForLeaf(bits("11111100"))), isLeaf: true},
+                                               {key: bools("11111111"), hash: hashPtr(hashForLeaf(bits("11111111"))), isLeaf: true},
+                                       },
+                               },
+                       },
+               },
+       }
+       got = lookup(tr.root, bitKey(bits("11111100")))
+       if !testutil.DeepEqual(got, tr.root.children[1].children[0]) {
+               t.Fatalf("got:\n%swant:\n%s", prettyNode(got, 0), prettyNode(tr.root.children[1].children[0], 0))
+       }
+}
+
+func TestContains(t *testing.T) {
+       tr := new(Tree)
+       tr.Insert(bits("00000011"))
+       tr.Insert(bits("00000010"))
+
+       if v := bits("00000011"); !tr.Contains(v) {
+               t.Errorf("expected tree to contain %x, but did not", v)
+       }
+       if v := bits("00000000"); tr.Contains(v) {
+               t.Errorf("expected tree to not contain %x, but did", v)
+       }
+       if v := bits("00000010"); !tr.Contains(v) {
+               t.Errorf("expected tree to contain %x, but did not", v)
+       }
+}
+
+func TestInsert(t *testing.T) {
+       tr := new(Tree)
+
+       tr.Insert(bits("11111111"))
+       tr.RootHash()
+       want := &Tree{
+               root: &node{key: bools("11111111"), hash: hashPtr(hashForLeaf(bits("11111111"))), isLeaf: true},
+       }
+       if !testutil.DeepEqual(tr.root, want.root) {
+               log.Printf("want hash? %x", hashForLeaf(bits("11111111")).Bytes())
+               t.Log("insert into empty tree")
+               t.Fatalf("got:\n%swant:\n%s", pretty(tr), pretty(want))
+       }
+
+       tr.Insert(bits("11111111"))
+       tr.RootHash()
+       want = &Tree{
+               root: &node{key: bools("11111111"), hash: hashPtr(hashForLeaf(bits("11111111"))), isLeaf: true},
+       }
+       if !testutil.DeepEqual(tr.root, want.root) {
+               t.Log("inserting the same key does not modify the tree")
+               t.Fatalf("got:\n%swant:\n%s", pretty(tr), pretty(want))
+       }
+
+       tr.Insert(bits("11110000"))
+       tr.RootHash()
+       want = &Tree{
+               root: &node{
+                       key:  bools("1111"),
+                       hash: hashPtr(hashForNonLeaf(hashForLeaf(bits("11110000")), hashForLeaf(bits("11111111")))),
+                       children: [2]*node{
+                               {key: bools("11110000"), hash: hashPtr(hashForLeaf(bits("11110000"))), isLeaf: true},
+                               {key: bools("11111111"), hash: hashPtr(hashForLeaf(bits("11111111"))), isLeaf: true},
+                       },
+               },
+       }
+       if !testutil.DeepEqual(tr.root, want.root) {
+               t.Log("different key creates a fork")
+               t.Fatalf("got:\n%swant:\n%s", pretty(tr), pretty(want))
+       }
+
+       tr.Insert(bits("11111100"))
+       tr.RootHash()
+       want = &Tree{
+               root: &node{
+                       key: bools("1111"),
+                       hash: hashPtr(hashForNonLeaf(
+                               hashForLeaf(bits("11110000")),
+                               hashForNonLeaf(hashForLeaf(bits("11111100")), hashForLeaf(bits("11111111"))),
+                       )),
+                       children: [2]*node{
+                               {key: bools("11110000"), hash: hashPtr(hashForLeaf(bits("11110000"))), isLeaf: true},
+                               {
+                                       key:  bools("111111"),
+                                       hash: hashPtr(hashForNonLeaf(hashForLeaf(bits("11111100")), hashForLeaf(bits("11111111")))),
+                                       children: [2]*node{
+                                               {key: bools("11111100"), hash: hashPtr(hashForLeaf(bits("11111100"))), isLeaf: true},
+                                               {key: bools("11111111"), hash: hashPtr(hashForLeaf(bits("11111111"))), isLeaf: true},
+                                       },
+                               },
+                       },
+               },
+       }
+       if !testutil.DeepEqual(tr.root, want.root) {
+               t.Fatalf("got:\n%swant:\n%s", pretty(tr), pretty(want))
+       }
+
+       tr.Insert(bits("11111110"))
+       tr.RootHash()
+       want = &Tree{
+               root: &node{
+                       key: bools("1111"),
+                       hash: hashPtr(hashForNonLeaf(
+                               hashForLeaf(bits("11110000")),
+                               hashForNonLeaf(
+                                       hashForLeaf(bits("11111100")),
+                                       hashForNonLeaf(hashForLeaf(bits("11111110")), hashForLeaf(bits("11111111"))),
+                               ),
+                       )),
+                       children: [2]*node{
+                               {key: bools("11110000"), hash: hashPtr(hashForLeaf(bits("11110000"))), isLeaf: true},
+                               {
+                                       key: bools("111111"),
+                                       hash: hashPtr(hashForNonLeaf(
+                                               hashForLeaf(bits("11111100")),
+                                               hashForNonLeaf(hashForLeaf(bits("11111110")), hashForLeaf(bits("11111111"))))),
+                                       children: [2]*node{
+                                               {key: bools("11111100"), hash: hashPtr(hashForLeaf(bits("11111100"))), isLeaf: true},
+                                               {
+                                                       key:  bools("1111111"),
+                                                       hash: hashPtr(hashForNonLeaf(hashForLeaf(bits("11111110")), hashForLeaf(bits("11111111")))),
+                                                       children: [2]*node{
+                                                               {key: bools("11111110"), hash: hashPtr(hashForLeaf(bits("11111110"))), isLeaf: true},
+                                                               {key: bools("11111111"), hash: hashPtr(hashForLeaf(bits("11111111"))), isLeaf: true},
+                                                       },
+                                               },
+                                       },
+                               },
+                       },
+               },
+       }
+       if !testutil.DeepEqual(tr.root, want.root) {
+               t.Log("a fork is created for each level of similar key")
+               t.Fatalf("got:\n%swant:\n%s", pretty(tr), pretty(want))
+       }
+
+       tr.Insert(bits("11111011"))
+       tr.RootHash()
+       want = &Tree{
+               root: &node{
+                       key: bools("1111"),
+                       hash: hashPtr(hashForNonLeaf(
+                               hashForLeaf(bits("11110000")),
+                               hashForNonLeaf(
+                                       hashForLeaf(bits("11111011")),
+                                       hashForNonLeaf(
+                                               hashForLeaf(bits("11111100")),
+                                               hashForNonLeaf(hashForLeaf(bits("11111110")), hashForLeaf(bits("11111111"))),
+                                       ),
+                               ),
+                       )),
+                       children: [2]*node{
+                               {key: bools("11110000"), hash: hashPtr(hashForLeaf(bits("11110000"))), isLeaf: true},
+                               {
+                                       key: bools("11111"),
+                                       hash: hashPtr(hashForNonLeaf(
+                                               hashForLeaf(bits("11111011")),
+                                               hashForNonLeaf(
+                                                       hashForLeaf(bits("11111100")),
+                                                       hashForNonLeaf(hashForLeaf(bits("11111110")), hashForLeaf(bits("11111111"))),
+                                               ))),
+                                       children: [2]*node{
+                                               {key: bools("11111011"), hash: hashPtr(hashForLeaf(bits("11111011"))), isLeaf: true},
+                                               {
+                                                       key: bools("111111"),
+                                                       hash: hashPtr(hashForNonLeaf(
+                                                               hashForLeaf(bits("11111100")),
+                                                               hashForNonLeaf(hashForLeaf(bits("11111110")), hashForLeaf(bits("11111111"))),
+                                                       )),
+                                                       children: [2]*node{
+                                                               {key: bools("11111100"), hash: hashPtr(hashForLeaf(bits("11111100"))), isLeaf: true},
+                                                               {
+                                                                       key:  bools("1111111"),
+                                                                       hash: hashPtr(hashForNonLeaf(hashForLeaf(bits("11111110")), hashForLeaf(bits("11111111")))),
+                                                                       children: [2]*node{
+                                                                               {key: bools("11111110"), hash: hashPtr(hashForLeaf(bits("11111110"))), isLeaf: true},
+                                                                               {key: bools("11111111"), hash: hashPtr(hashForLeaf(bits("11111111"))), isLeaf: true},
+                                                                       },
+                                                               },
+                                                       },
+                                               },
+                                       },
+                               },
+                       },
+               },
+       }
+       if !testutil.DeepEqual(tr.root, want.root) {
+               t.Log("compressed branch node is split")
+               t.Fatalf("got:\n%swant:\n%s", pretty(tr), pretty(want))
+       }
+}
+
+func TestDelete(t *testing.T) {
+       tr := new(Tree)
+       tr.root = &node{
+               key: bools("1111"),
+               hash: hashPtr(hashForNonLeaf(
+                       hashForLeaf(bits("11110000")),
+                       hashForNonLeaf(
+                               hashForLeaf(bits("11111100")),
+                               hashForNonLeaf(hashForLeaf(bits("11111110")), hashForLeaf(bits("11111111"))),
+                       ),
+               )),
+               children: [2]*node{
+                       {key: bools("11110000"), hash: hashPtr(hashForLeaf(bits("11110000"))), isLeaf: true},
+                       {
+                               key: bools("111111"),
+                               hash: hashPtr(hashForNonLeaf(
+                                       hashForLeaf(bits("11111100")),
+                                       hashForNonLeaf(hashForLeaf(bits("11111110")), hashForLeaf(bits("11111111"))),
+                               )),
+                               children: [2]*node{
+                                       {key: bools("11111100"), hash: hashPtr(hashForLeaf(bits("11111100"))), isLeaf: true},
+                                       {
+                                               key:  bools("1111111"),
+                                               hash: hashPtr(hashForNonLeaf(hashForLeaf(bits("11111110")), hashForLeaf(bits("11111111")))),
+                                               children: [2]*node{
+                                                       {key: bools("11111110"), hash: hashPtr(hashForLeaf(bits("11111110"))), isLeaf: true},
+                                                       {key: bools("11111111"), hash: hashPtr(hashForLeaf(bits("11111111"))), isLeaf: true},
+                                               },
+                                       },
+                               },
+                       },
+               },
+       }
+
+       tr.Delete(bits("11111110"))
+       tr.RootHash()
+       want := &Tree{
+               root: &node{
+                       key: bools("1111"),
+                       hash: hashPtr(hashForNonLeaf(
+                               hashForLeaf(bits("11110000")),
+                               hashForNonLeaf(hashForLeaf(bits("11111100")), hashForLeaf(bits("11111111"))),
+                       )),
+                       children: [2]*node{
+                               {key: bools("11110000"), hash: hashPtr(hashForLeaf(bits("11110000"))), isLeaf: true},
+                               {
+                                       key:  bools("111111"),
+                                       hash: hashPtr(hashForNonLeaf(hashForLeaf(bits("11111100")), hashForLeaf(bits("11111111")))),
+                                       children: [2]*node{
+                                               {key: bools("11111100"), hash: hashPtr(hashForLeaf(bits("11111100"))), isLeaf: true},
+                                               {key: bools("11111111"), hash: hashPtr(hashForLeaf(bits("11111111"))), isLeaf: true},
+                                       },
+                               },
+                       },
+               },
+       }
+       if !testutil.DeepEqual(tr.root, want.root) {
+               t.Fatalf("got:\n%swant:\n%s", pretty(tr), pretty(want))
+       }
+
+       tr.Delete(bits("11111100"))
+       tr.RootHash()
+       want = &Tree{
+               root: &node{
+                       key:  bools("1111"),
+                       hash: hashPtr(hashForNonLeaf(hashForLeaf(bits("11110000")), hashForLeaf(bits("11111111")))),
+                       children: [2]*node{
+                               {key: bools("11110000"), hash: hashPtr(hashForLeaf(bits("11110000"))), isLeaf: true},
+                               {key: bools("11111111"), hash: hashPtr(hashForLeaf(bits("11111111"))), isLeaf: true},
+                       },
+               },
+       }
+       if !testutil.DeepEqual(tr.root, want.root) {
+               t.Fatalf("got:\n%swant:\n%s", pretty(tr), pretty(want))
+       }
+
+       tr.Delete(bits("11110011")) // nonexistent value
+       tr.RootHash()
+       if !testutil.DeepEqual(tr.root, want.root) {
+               t.Fatalf("got:\n%swant:\n%s", pretty(tr), pretty(want))
+       }
+
+       tr.Delete(bits("11110000"))
+       tr.RootHash()
+       want = &Tree{
+               root: &node{key: bools("11111111"), hash: hashPtr(hashForLeaf(bits("11111111"))), isLeaf: true},
+       }
+       if !testutil.DeepEqual(tr.root, want.root) {
+               t.Fatalf("got:\n%swant:\n%s", pretty(tr), pretty(want))
+       }
+
+       tr.Delete(bits("11111111"))
+       tr.RootHash()
+       want = &Tree{}
+       if !testutil.DeepEqual(tr.root, want.root) {
+               t.Fatalf("got:\n%swant:\n%s", pretty(tr), pretty(want))
+       }
+}
+
+func TestDeletePrefix(t *testing.T) {
+       root := &node{
+               key:  bools("111111"),
+               hash: hashPtr(hashForNonLeaf(hashForLeaf(bits("11111110")), hashForLeaf(bits("11111111")))),
+               children: [2]*node{
+                       {key: bools("11111110"), hash: hashPtr(hashForLeaf(bits("11111110"))), isLeaf: true},
+                       {key: bools("11111111"), hash: hashPtr(hashForLeaf(bits("11111111"))), isLeaf: true},
+               },
+       }
+
+       got := delete(root, bools("111111"))
+       got.calcHash()
+       if !testutil.DeepEqual(got, root) {
+               t.Fatalf("got:\n%swant:\n%s", prettyNode(got, 0), prettyNode(root, 0))
+       }
+}
+
+func TestBoolKey(t *testing.T) {
+       cases := []struct {
+               b []byte
+               w []uint8
+       }{{
+               b: nil,
+               w: []uint8{},
+       }, {
+               b: []byte{0x8f},
+               w: []uint8{1, 0, 0, 0, 1, 1, 1, 1},
+       }, {
+               b: []byte{0x81},
+               w: []uint8{1, 0, 0, 0, 0, 0, 0, 1},
+       }}
+
+       for _, c := range cases {
+               g := bitKey(c.b)
+
+               if !testutil.DeepEqual(g, c.w) {
+                       t.Errorf("Key(0x%x) = %v want %v", c.b, g, c.w)
+               }
+       }
+}
+
+func TestByteKey(t *testing.T) {
+       cases := []struct {
+               b []uint8
+               w []byte
+       }{{
+               b: []uint8{},
+               w: []byte{},
+       }, {
+               b: []uint8{1, 0, 0, 0, 1, 1, 1, 1},
+               w: []byte{0x8f},
+       }, {
+               b: []uint8{1, 0, 0, 0, 0, 0, 0, 1},
+               w: []byte{0x81},
+       }, {
+               b: []uint8{1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1},
+               w: []byte{0x81, 0x8f},
+       }}
+
+       for _, c := range cases {
+               g := byteKey(c.b)
+
+               if !testutil.DeepEqual(g, c.w) {
+                       t.Errorf("byteKey(%#v) = %x want %x", c.b, g, c.w)
+               }
+       }
+}
+
+func pretty(t *Tree) string {
+       if t.root == nil {
+               return ""
+       }
+       return prettyNode(t.root, 0)
+}
+
+func prettyNode(n *node, depth int) string {
+       prettyStr := strings.Repeat("  ", depth)
+       if n == nil {
+               prettyStr += "nil\n"
+               return prettyStr
+       }
+       var b int
+       if len(n.key) > 31*8 {
+               b = 31 * 8
+       }
+       prettyStr += fmt.Sprintf("key=%+v", n.key[b:])
+       if n.hash != nil {
+               prettyStr += fmt.Sprintf(" hash=%+v", n.hash)
+       }
+       prettyStr += "\n"
+
+       for _, c := range n.children {
+               if c != nil {
+                       prettyStr += prettyNode(c, depth+1)
+               }
+       }
+
+       return prettyStr
+}
+
+func bits(lit string) []byte {
+       var b [31]byte
+       n, _ := strconv.ParseUint(lit, 2, 8)
+       return append(b[:], byte(n))
+}
+
+func bools(lit string) []uint8 {
+       b := bitKey(bits(lit))
+       return append(b[:31*8], b[32*8-len(lit):]...)
+}
+
+func hashForLeaf(item []byte) bc.Hash {
+       return bc.NewHash(sha3.Sum256(append([]byte{0x00}, item...)))
+}
+
+func hashForNonLeaf(a, b bc.Hash) bc.Hash {
+       d := []byte{0x01}
+       d = append(d, a.Bytes()...)
+       d = append(d, b.Bytes()...)
+       return bc.NewHash(sha3.Sum256(d))
+}
+
+func hashPtr(h bc.Hash) *bc.Hash {
+       return &h
+}
diff --git a/protocol/protocol.go b/protocol/protocol.go
new file mode 100644 (file)
index 0000000..c03fa2c
--- /dev/null
@@ -0,0 +1,231 @@
+/*
+Package protocol provides the logic to tie together
+storage and validation for a Chain Protocol blockchain.
+
+This comprises all behavior that's common to every full
+node, as well as other functions that need to operate on the
+blockchain state.
+
+Here are a few examples of typical full node types.
+
+Generator
+
+A generator has two basic jobs: collecting transactions from
+other nodes and putting them into blocks.
+
+To add a new block to the blockchain, call GenerateBlock,
+sign the block (possibly collecting signatures from other
+parties), and call CommitBlock.
+
+Signer
+
+A signer validates blocks generated by the Generator and signs
+at most one block at each height.
+
+Participant
+
+A participant node in a network may select outputs for spending
+and compose transactions.
+
+To publish a new transaction, prepare your transaction
+(select outputs, and compose and sign the tx) and send the
+transaction to the network's generator. To wait for
+confirmation, call BlockWaiter on successive block heights
+and inspect the blockchain state until you find that the
+transaction has been either confirmed or rejected. Note
+that transactions may be malleable if there's no commitment
+to TXSIGHASH.
+
+To ingest a block, call ValidateBlock and CommitBlock.
+*/
+package protocol
+
+import (
+       "context"
+       "sync"
+       "time"
+
+       "github.com/golang/groupcache/lru"
+
+       "chain/errors"
+       "chain/log"
+       "chain/protocol/bc"
+       "chain/protocol/bc/legacy"
+       "chain/protocol/state"
+)
+
+// maxCachedValidatedTxs is the max number of validated txs to cache.
+const maxCachedValidatedTxs = 1000
+
+var (
+       // ErrTheDistantFuture is returned when waiting for a blockheight
+       // too far in excess of the tip of the blockchain.
+       ErrTheDistantFuture = errors.New("block height too far in future")
+)
+
+// Store provides storage for blockchain data: blocks and state tree
+// snapshots.
+//
+// Note, this is different from a state snapshot. A state snapshot
+// provides access to the state at a given point in time -- outputs
+// and issuance memory. The Chain type uses Store to load state
+// from storage and persist validated data.
+type Store interface {
+       Height(context.Context) (uint64, error)
+       GetBlock(context.Context, uint64) (*legacy.Block, error)
+       LatestSnapshot(context.Context) (*state.Snapshot, uint64, error)
+
+       SaveBlock(context.Context, *legacy.Block) error
+       FinalizeBlock(context.Context, uint64) error
+       SaveSnapshot(context.Context, uint64, *state.Snapshot) error
+}
+
+// Chain provides a complete, minimal blockchain database. It
+// delegates the underlying storage to other objects, and uses
+// validation logic from package validation to decide what
+// objects can be safely stored.
+type Chain struct {
+       InitialBlockHash  bc.Hash
+       MaxIssuanceWindow time.Duration // only used by generators
+
+       state struct {
+               cond     sync.Cond // protects height, block, snapshot
+               height   uint64
+               block    *legacy.Block   // current only if leader
+               snapshot *state.Snapshot // current only if leader
+       }
+       store Store
+
+       lastQueuedSnapshot time.Time
+       pendingSnapshots   chan pendingSnapshot
+
+       prevalidated prevalidatedTxsCache
+}
+
+type pendingSnapshot struct {
+       height   uint64
+       snapshot *state.Snapshot
+}
+
+// NewChain returns a new Chain using store as the underlying storage.
+func NewChain(ctx context.Context, initialBlockHash bc.Hash, store Store, heights <-chan uint64) (*Chain, error) {
+       c := &Chain{
+               InitialBlockHash: initialBlockHash,
+               store:            store,
+               pendingSnapshots: make(chan pendingSnapshot, 1),
+               prevalidated: prevalidatedTxsCache{
+                       lru: lru.New(maxCachedValidatedTxs),
+               },
+       }
+       c.state.cond.L = new(sync.Mutex)
+
+       var err error
+       c.state.height, err = store.Height(ctx)
+       if err != nil {
+               return nil, errors.Wrap(err, "looking up blockchain height")
+       }
+
+       // Note that c.height.n may still be zero here.
+       if heights != nil {
+               go func() {
+                       for h := range heights {
+                               c.setHeight(h)
+                       }
+               }()
+       }
+
+       go func() {
+               for {
+                       select {
+                       case <-ctx.Done():
+                               return
+                       case ps := <-c.pendingSnapshots:
+                               err = store.SaveSnapshot(ctx, ps.height, ps.snapshot)
+                               if err != nil {
+                                       log.Error(ctx, err, "at", "saving snapshot")
+                               }
+                       }
+               }
+       }()
+
+       return c, nil
+}
+
+// Height returns the current height of the blockchain.
+func (c *Chain) Height() uint64 {
+       c.state.cond.L.Lock()
+       defer c.state.cond.L.Unlock()
+       return c.state.height
+}
+
+// TimestampMS returns the latest known block timestamp.
+func (c *Chain) TimestampMS() uint64 {
+       c.state.cond.L.Lock()
+       defer c.state.cond.L.Unlock()
+       if c.state.block == nil {
+               return 0
+       }
+       return c.state.block.TimestampMS
+}
+
+// State returns the most recent state available. It will not be current
+// unless the current process is the leader. Callers should examine the
+// returned block header's height if they need to verify the current state.
+func (c *Chain) State() (*legacy.Block, *state.Snapshot) {
+       c.state.cond.L.Lock()
+       defer c.state.cond.L.Unlock()
+       return c.state.block, c.state.snapshot
+}
+
+func (c *Chain) setState(b *legacy.Block, s *state.Snapshot) {
+       c.state.cond.L.Lock()
+       defer c.state.cond.L.Unlock()
+       c.state.block = b
+       c.state.snapshot = s
+       if b != nil && b.Height > c.state.height {
+               c.state.height = b.Height
+               c.state.cond.Broadcast()
+       }
+}
+
+// BlockSoonWaiter returns a channel that
+// waits for the block at the given height,
+// but it is an error to wait for a block far in the future.
+// WaitForBlockSoon will timeout if the context times out.
+// To wait unconditionally, the caller should use WaitForBlock.
+func (c *Chain) BlockSoonWaiter(ctx context.Context, height uint64) <-chan error {
+       ch := make(chan error, 1)
+
+       go func() {
+               const slop = 3
+               if height > c.Height()+slop {
+                       ch <- ErrTheDistantFuture
+                       return
+               }
+
+               select {
+               case <-c.BlockWaiter(height):
+                       ch <- nil
+               case <-ctx.Done():
+                       ch <- ctx.Err()
+               }
+       }()
+
+       return ch
+}
+
+// BlockWaiter returns a channel that
+// waits for the block at the given height.
+func (c *Chain) BlockWaiter(height uint64) <-chan struct{} {
+       ch := make(chan struct{}, 1)
+       go func() {
+               c.state.cond.L.Lock()
+               defer c.state.cond.L.Unlock()
+               for c.state.height < height {
+                       c.state.cond.Wait()
+               }
+               ch <- struct{}{}
+       }()
+
+       return ch
+}
diff --git a/protocol/prottest/block.go b/protocol/prottest/block.go
new file mode 100644 (file)
index 0000000..0a5900e
--- /dev/null
@@ -0,0 +1,149 @@
+package prottest
+
+import (
+       "context"
+       "sync"
+       "testing"
+       "time"
+
+       "chain/crypto/ed25519"
+       "chain/protocol"
+       "chain/protocol/bc"
+       "chain/protocol/bc/legacy"
+       "chain/protocol/prottest/memstore"
+       "chain/protocol/state"
+       "chain/testutil"
+)
+
+var (
+       mutex         sync.Mutex // protects the following
+       states        = make(map[*protocol.Chain]*state.Snapshot)
+       blockPubkeys  = make(map[*protocol.Chain][]ed25519.PublicKey)
+       blockPrivkeys = make(map[*protocol.Chain][]ed25519.PrivateKey)
+)
+
+type Option func(testing.TB, *config)
+
+func WithStore(store protocol.Store) Option {
+       return func(_ testing.TB, conf *config) { conf.store = store }
+}
+
+func WithOutputIDs(outputIDs ...bc.Hash) Option {
+       return func(_ testing.TB, conf *config) {
+               for _, oid := range outputIDs {
+                       conf.initialState.Tree.Insert(oid.Bytes())
+               }
+       }
+}
+
+func WithBlockSigners(quorum, n int) Option {
+       return func(tb testing.TB, conf *config) {
+               conf.quorum = quorum
+               for i := 0; i < n; i++ {
+                       pubkey, privkey, err := ed25519.GenerateKey(nil)
+                       if err != nil {
+                               testutil.FatalErr(tb, err)
+                       }
+                       conf.pubkeys = append(conf.pubkeys, pubkey)
+                       conf.privkeys = append(conf.privkeys, privkey)
+               }
+       }
+}
+
+type config struct {
+       store        protocol.Store
+       initialState *state.Snapshot
+       pubkeys      []ed25519.PublicKey
+       privkeys     []ed25519.PrivateKey
+       quorum       int
+}
+
+// NewChain makes a new Chain. By default it uses a memstore for
+// storage and creates an initial block using a 0/0 multisig program.
+// It commits the initial block before returning the Chain.
+//
+// Its defaults may be overridden by providing Options.
+func NewChain(tb testing.TB, opts ...Option) *protocol.Chain {
+       conf := config{store: memstore.New(), initialState: state.Empty()}
+       for _, opt := range opts {
+               opt(tb, &conf)
+       }
+
+       ctx := context.Background()
+       b1, err := protocol.NewInitialBlock(conf.pubkeys, conf.quorum, time.Now())
+       if err != nil {
+               testutil.FatalErr(tb, err)
+       }
+       c, err := protocol.NewChain(ctx, b1.Hash(), conf.store, nil)
+       if err != nil {
+               testutil.FatalErr(tb, err)
+       }
+       c.MaxIssuanceWindow = 48 * time.Hour // TODO(tessr): consider adding MaxIssuanceWindow to NewChain
+
+       err = c.CommitAppliedBlock(ctx, b1, conf.initialState)
+       if err != nil {
+               testutil.FatalErr(tb, err)
+       }
+
+       // save block-signing keys in global state
+       mutex.Lock()
+       blockPubkeys[c] = conf.pubkeys
+       blockPrivkeys[c] = conf.privkeys
+       mutex.Unlock()
+
+       return c
+}
+
+// Initial returns the provided Chain's initial block.
+func Initial(tb testing.TB, c *protocol.Chain) *legacy.Block {
+       ctx := context.Background()
+       b1, err := c.GetBlock(ctx, 1)
+       if err != nil {
+               testutil.FatalErr(tb, err)
+       }
+       return b1
+}
+
+// BlockKeyPairs returns the configured block-signing key-pairs
+// for the provided Chain.
+func BlockKeyPairs(c *protocol.Chain) ([]ed25519.PublicKey, []ed25519.PrivateKey) {
+       mutex.Lock()
+       defer mutex.Unlock()
+       return blockPubkeys[c], blockPrivkeys[c]
+}
+
+// MakeBlock makes a new block from txs, commits it, and returns it.
+// It assumes c's consensus program requires 0 signatures.
+// (This is true for chains returned by NewChain.)
+// If c requires more than 0 signatures, MakeBlock will fail.
+// MakeBlock always makes a block;
+// if there are no transactions in txs,
+// it makes an empty block.
+func MakeBlock(tb testing.TB, c *protocol.Chain, txs []*legacy.Tx) *legacy.Block {
+       ctx := context.Background()
+       curBlock, err := c.GetBlock(ctx, c.Height())
+       if err != nil {
+               testutil.FatalErr(tb, err)
+       }
+
+       mutex.Lock()
+       curState := states[c]
+       mutex.Unlock()
+       if curState == nil {
+               curState = state.Empty()
+       }
+
+       nextBlock, nextState, err := c.GenerateBlock(ctx, curBlock, curState, time.Now(), txs)
+       if err != nil {
+               testutil.FatalErr(tb, err)
+       }
+       err = c.CommitAppliedBlock(ctx, nextBlock, nextState)
+       if err != nil {
+               testutil.FatalErr(tb, err)
+       }
+
+       mutex.Lock()
+       states[c] = nextState
+       mutex.Unlock()
+       return nextBlock
+}
diff --git a/protocol/prottest/block_test.go b/protocol/prottest/block_test.go
new file mode 100644 (file)
index 0000000..95bbf66
--- /dev/null
@@ -0,0 +1,15 @@
+package prottest
+
+import "testing"
+
+func TestMakeBlock(t *testing.T) {
+       c := NewChain(t)
+       MakeBlock(t, c, nil)
+       MakeBlock(t, c, nil)
+       MakeBlock(t, c, nil)
+
+       var want uint64 = 4
+       if got := c.Height(); got != want {
+               t.Errorf("c.Height() = %d want %d", got, want)
+       }
+}
diff --git a/protocol/prottest/doc.go b/protocol/prottest/doc.go
new file mode 100644 (file)
index 0000000..1d673af
--- /dev/null
@@ -0,0 +1,2 @@
+// Package prottest provides utilities for Chain Protocol testing.
+package prottest
diff --git a/protocol/prottest/memstore/memstore.go b/protocol/prottest/memstore/memstore.go
new file mode 100644 (file)
index 0000000..6f91f92
--- /dev/null
@@ -0,0 +1,77 @@
+// MemStore is a Store implementation that
+// keeps all blockchain state in memory.
+//
+// It is used in tests to avoid needing a database.
+package memstore
+
+import (
+       "context"
+       "fmt"
+       "sync"
+
+       "chain/protocol/bc/legacy"
+       "chain/protocol/state"
+)
+
+// MemStore satisfies the Store interface.
+type MemStore struct {
+       mu          sync.Mutex
+       Blocks      map[uint64]*legacy.Block
+       State       *state.Snapshot
+       StateHeight uint64
+}
+
+// New returns a new MemStore
+func New() *MemStore {
+       return &MemStore{Blocks: make(map[uint64]*legacy.Block)}
+}
+
+func (m *MemStore) Height(context.Context) (uint64, error) {
+       m.mu.Lock()
+       defer m.mu.Unlock()
+
+       return uint64(len(m.Blocks)), nil
+}
+
+func (m *MemStore) SaveBlock(ctx context.Context, b *legacy.Block) error {
+       m.mu.Lock()
+       defer m.mu.Unlock()
+
+       existing, ok := m.Blocks[b.Height]
+       if ok && existing.Hash() != b.Hash() {
+               return fmt.Errorf("already have a block at height %d", b.Height)
+       }
+       m.Blocks[b.Height] = b
+       return nil
+}
+
+func (m *MemStore) SaveSnapshot(ctx context.Context, height uint64, snapshot *state.Snapshot) error {
+       m.mu.Lock()
+       defer m.mu.Unlock()
+
+       m.State = state.Copy(snapshot)
+       m.StateHeight = height
+       return nil
+}
+
+func (m *MemStore) GetBlock(ctx context.Context, height uint64) (*legacy.Block, error) {
+       m.mu.Lock()
+       defer m.mu.Unlock()
+       b, ok := m.Blocks[height]
+       if !ok {
+               return nil, fmt.Errorf("memstore: no block at height %d", height)
+       }
+       return b, nil
+}
+
+func (m *MemStore) LatestSnapshot(context.Context) (*state.Snapshot, uint64, error) {
+       m.mu.Lock()
+       defer m.mu.Unlock()
+
+       if m.State == nil {
+               m.State = state.Empty()
+       }
+       return state.Copy(m.State), m.StateHeight, nil
+}
+
+func (m *MemStore) FinalizeBlock(context.Context, uint64) error { return nil }
diff --git a/protocol/recover.go b/protocol/recover.go
new file mode 100644 (file)
index 0000000..6d9aec7
--- /dev/null
@@ -0,0 +1,70 @@
+package protocol
+
+import (
+       "context"
+       "fmt"
+
+       "chain/errors"
+       "chain/protocol/bc/legacy"
+       "chain/protocol/state"
+)
+
+// Recover performs crash recovery, restoring the blockchain
+// to a complete state. It returns the latest confirmed block
+// and the corresponding state snapshot.
+//
+// If the blockchain is empty (missing initial block), this function
+// returns a nil block and an empty snapshot.
+func (c *Chain) Recover(ctx context.Context) (*legacy.Block, *state.Snapshot, error) {
+       snapshot, snapshotHeight, err := c.store.LatestSnapshot(ctx)
+       if err != nil {
+               return nil, nil, errors.Wrap(err, "getting latest snapshot")
+       }
+       var b *legacy.Block
+       if snapshotHeight > 0 {
+               b, err = c.store.GetBlock(ctx, snapshotHeight)
+               if err != nil {
+                       return nil, nil, errors.Wrap(err, "getting snapshot block")
+               }
+               c.lastQueuedSnapshot = b.Time()
+       }
+       if snapshot == nil {
+               snapshot = state.Empty()
+       }
+
+       // The true height of the blockchain might be higher than the
+       // height at which the state snapshot was taken. Replay all
+       // existing blocks higher than the snapshot height.
+       height, err := c.store.Height(ctx)
+       if err != nil {
+               return nil, nil, errors.Wrap(err, "getting blockchain height")
+       }
+
+       // Bring the snapshot up to date with the latest block
+       for h := snapshotHeight + 1; h <= height; h++ {
+               b, err = c.store.GetBlock(ctx, h)
+               if err != nil {
+                       return nil, nil, errors.Wrap(err, "getting block")
+               }
+               err = snapshot.ApplyBlock(legacy.MapBlock(b))
+               if err != nil {
+                       return nil, nil, errors.Wrap(err, "applying block")
+               }
+               if b.AssetsMerkleRoot != snapshot.Tree.RootHash() {
+                       return nil, nil, fmt.Errorf("block %d has state root %x; snapshot has root %x",
+                               b.Height, b.AssetsMerkleRoot.Bytes(), snapshot.Tree.RootHash().Bytes())
+               }
+       }
+       if b != nil {
+               // All blocks before the latest one have been fully processed
+               // (saved in the db, callbacks invoked). The last one may have
+               // been too, but make sure just in case. Also "finalize" the last
+               // block (notifying other processes of the latest block height)
+               // and maybe persist the snapshot.
+               err = c.CommitAppliedBlock(ctx, b, snapshot)
+               if err != nil {
+                       return nil, nil, errors.Wrap(err, "committing block")
+               }
+       }
+       return b, snapshot, nil
+}
diff --git a/protocol/recover_test.go b/protocol/recover_test.go
new file mode 100644 (file)
index 0000000..7e51166
--- /dev/null
@@ -0,0 +1,79 @@
+package protocol
+
+import (
+       "context"
+       "log"
+       "testing"
+       "time"
+
+       "chain/protocol/bc"
+       "chain/protocol/bc/legacy"
+       "chain/protocol/prottest/memstore"
+       "chain/protocol/state"
+       "chain/testutil"
+)
+
+func TestRecoverSnapshotNoAdditionalBlocks(t *testing.T) {
+       store := memstore.New()
+       b, err := NewInitialBlock(nil, 0, time.Now().Add(-time.Minute))
+       if err != nil {
+               testutil.FatalErr(t, err)
+       }
+       c1, err := NewChain(context.Background(), b.Hash(), store, nil)
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = c1.CommitAppliedBlock(context.Background(), b, state.Empty())
+       if err != nil {
+               testutil.FatalErr(t, err)
+       }
+
+       // Snapshots are applied asynchronously. This loops waits
+       // until the snapshot is created.
+       for {
+               _, height, _ := store.LatestSnapshot(context.Background())
+               if height > 0 {
+                       break
+               }
+       }
+
+       ctx := context.Background()
+
+       c2, err := NewChain(context.Background(), b.Hash(), store, nil)
+       if err != nil {
+               t.Fatal(err)
+       }
+       block, snapshot, err := c2.Recover(ctx)
+       if err != nil {
+               t.Fatal(err)
+       }
+       if block.Height != 1 {
+               t.Fatalf("block.Height = %d, want %d", block.Height, 1)
+       }
+
+       err = c2.ValidateBlockForSig(ctx, createEmptyBlock(block, snapshot))
+       if err != nil {
+               t.Fatal(err)
+       }
+}
+
+func createEmptyBlock(block *legacy.Block, snapshot *state.Snapshot) *legacy.Block {
+       root, err := bc.MerkleRoot(nil)
+       if err != nil {
+               log.Fatalf("calculating empty merkle root: %s", err)
+       }
+
+       return &legacy.Block{
+               BlockHeader: legacy.BlockHeader{
+                       Version:           1,
+                       Height:            block.Height + 1,
+                       PreviousBlockHash: block.Hash(),
+                       TimestampMS:       bc.Millis(time.Now()),
+                       BlockCommitment: legacy.BlockCommitment{
+                               TransactionsMerkleRoot: root,
+                               AssetsMerkleRoot:       snapshot.Tree.RootHash(),
+                               ConsensusProgram:       block.ConsensusProgram,
+                       },
+               },
+       }
+}
diff --git a/protocol/state/snapshot.go b/protocol/state/snapshot.go
new file mode 100644 (file)
index 0000000..ed6f1df
--- /dev/null
@@ -0,0 +1,116 @@
+package state
+
+import (
+       "fmt"
+
+       "chain/errors"
+       "chain/protocol/bc"
+       "chain/protocol/patricia"
+)
+
+// Snapshot encompasses a snapshot of entire blockchain state. It
+// consists of a patricia state tree and the nonce set.
+//
+// Nonces maps a nonce entry's ID to the time (in Unix millis) at
+// which it should expire from the nonce set.
+//
+// TODO: consider making type Snapshot truly immutable.  We already
+// handle it that way in many places (with explicit calls to Copy to
+// get the right behavior).  PruneNonces and the Apply functions would
+// have to produce new Snapshots rather than updating Snapshots in
+// place.
+type Snapshot struct {
+       Tree   *patricia.Tree
+       Nonces map[bc.Hash]uint64
+}
+
+// PruneNonces modifies a Snapshot, removing all nonce IDs with
+// expiration times earlier than the provided timestamp.
+func (s *Snapshot) PruneNonces(timestampMS uint64) {
+       for hash, expiryMS := range s.Nonces {
+               if timestampMS > expiryMS {
+                       delete(s.Nonces, hash)
+               }
+       }
+}
+
+// Copy makes a copy of provided snapshot. Copying a snapshot is an
+// O(n) operation where n is the number of nonces in the snapshot's
+// nonce set.
+func Copy(original *Snapshot) *Snapshot {
+       c := &Snapshot{
+               Tree:   new(patricia.Tree),
+               Nonces: make(map[bc.Hash]uint64, len(original.Nonces)),
+       }
+       *c.Tree = *original.Tree
+       for k, v := range original.Nonces {
+               c.Nonces[k] = v
+       }
+       return c
+}
+
+// Empty returns an empty state snapshot.
+func Empty() *Snapshot {
+       return &Snapshot{
+               Tree:   new(patricia.Tree),
+               Nonces: make(map[bc.Hash]uint64),
+       }
+}
+
+// ApplyBlock updates s in place.
+func (s *Snapshot) ApplyBlock(block *bc.Block) error {
+       s.PruneNonces(block.TimestampMs)
+       for i, tx := range block.Transactions {
+               err := s.ApplyTx(tx)
+               if err != nil {
+                       return errors.Wrapf(err, "applying block transaction %d", i)
+               }
+       }
+       return nil
+}
+
+// ApplyTx updates s in place.
+func (s *Snapshot) ApplyTx(tx *bc.Tx) error {
+       for _, n := range tx.NonceIDs {
+               // Add new nonces. They must not conflict with nonces already
+               // present.
+               if _, ok := s.Nonces[n]; ok {
+                       return fmt.Errorf("conflicting nonce %x", n.Bytes())
+               }
+
+               nonce, err := tx.Nonce(n)
+               if err != nil {
+                       return errors.Wrap(err, "applying nonce")
+               }
+               tr, err := tx.TimeRange(*nonce.TimeRangeId)
+               if err != nil {
+                       return errors.Wrap(err, "applying nonce")
+               }
+
+               s.Nonces[n] = tr.MaxTimeMs
+       }
+
+       // Remove spent outputs. Each output must be present.
+       for _, prevout := range tx.SpentOutputIDs {
+               if !s.Tree.Contains(prevout.Bytes()) {
+                       return fmt.Errorf("invalid prevout %x", prevout.Bytes())
+               }
+               s.Tree.Delete(prevout.Bytes())
+       }
+
+       // Add new outputs. They must not yet be present.
+       for _, id := range tx.TxHeader.ResultIds {
+               // Ensure that this result is an output. It could be a retirement
+               // which should not be inserted into the state tree.
+               e := tx.Entries[*id]
+               if _, ok := e.(*bc.Output); !ok {
+                       continue
+               }
+
+               err := s.Tree.Insert(id.Bytes())
+               if err != nil {
+                       return err
+               }
+       }
+       return nil
+}
diff --git a/protocol/state/snapshot_test.go b/protocol/state/snapshot_test.go
new file mode 100644 (file)
index 0000000..aba6740
--- /dev/null
@@ -0,0 +1,107 @@
+package state
+
+import (
+       "reflect"
+       "testing"
+       "time"
+
+       "chain/protocol/bc"
+       "chain/protocol/bc/bctest"
+       "chain/protocol/bc/legacy"
+)
+
+func TestApplyTxSpend(t *testing.T) {
+       assetID := bc.AssetID{}
+       sourceID := bc.NewHash([32]byte{0x01, 0x02, 0x03})
+       sc := legacy.SpendCommitment{
+               AssetAmount:    bc.AssetAmount{AssetId: &assetID, Amount: 100},
+               SourceID:       sourceID,
+               SourcePosition: 0,
+               VMVersion:      1,
+               ControlProgram: nil,
+               RefDataHash:    bc.Hash{},
+       }
+       spentOutputID, err := legacy.ComputeOutputID(&sc)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       snap := Empty()
+       snap.Tree.Insert(spentOutputID.Bytes())
+
+       tx := legacy.MapTx(&legacy.TxData{
+               Version: 1,
+               Inputs: []*legacy.TxInput{
+                       legacy.NewSpendInput(nil, sourceID, assetID, 100, 0, nil, bc.Hash{}, nil),
+               },
+               Outputs: []*legacy.TxOutput{},
+       })
+
+       // Apply the spend transaction.
+       err = snap.ApplyTx(tx)
+       if err != nil {
+               t.Fatal(err)
+       }
+       if snap.Tree.Contains(spentOutputID.Bytes()) {
+               t.Error("snapshot contains spent prevout")
+       }
+       err = snap.ApplyTx(tx)
+       if err == nil {
+               t.Error("expected error applying spend twice, got nil")
+       }
+}
+
+func TestApplyIssuanceTwice(t *testing.T) {
+       snap := Empty()
+       issuance := legacy.MapTx(&bctest.NewIssuanceTx(t, bc.EmptyStringHash).TxData)
+       err := snap.ApplyTx(issuance)
+       if err != nil {
+               t.Fatal(err)
+       }
+       err = snap.ApplyTx(issuance)
+       if err == nil {
+               t.Errorf("expected error for duplicate nonce, got %s", err)
+       }
+}
+
+func TestCopySnapshot(t *testing.T) {
+       snap := Empty()
+       err := snap.ApplyTx(legacy.MapTx(&bctest.NewIssuanceTx(t, bc.EmptyStringHash).TxData))
+       if err != nil {
+               t.Fatal(err)
+       }
+       dupe := Copy(snap)
+       if !reflect.DeepEqual(dupe, snap) {
+               t.Errorf("got %#v, want %#v", dupe, snap)
+       }
+}
+
+func TestApplyBlock(t *testing.T) {
+       // Setup a snapshot with a nonce with a known expiry.
+       maxTime := bc.Millis(time.Now().Add(5 * time.Minute))
+       issuance := bctest.NewIssuanceTx(t, bc.EmptyStringHash, func(tx *legacy.Tx) {
+               tx.MaxTime = maxTime
+       })
+       snap := Empty()
+       err := snap.ApplyTx(legacy.MapTx(&issuance.TxData))
+       if err != nil {
+               t.Fatal(err)
+       }
+       if n := len(snap.Nonces); n != 1 {
+               t.Errorf("got %d nonces, want 1", n)
+       }
+
+       // Land a block later than the issuance's max time.
+       block := &legacy.Block{
+               BlockHeader: legacy.BlockHeader{
+                       TimestampMS: maxTime + 1,
+               },
+       }
+       err = snap.ApplyBlock(legacy.MapBlock(block))
+       if err != nil {
+               t.Fatal(err)
+       }
+       if n := len(snap.Nonces); n != 0 {
+               t.Errorf("got %d nonces, want 0", n)
+       }
+}
diff --git a/protocol/tx.go b/protocol/tx.go
new file mode 100644 (file)
index 0000000..8743a9f
--- /dev/null
@@ -0,0 +1,69 @@
+package protocol
+
+import (
+       "sync"
+
+       "github.com/golang/groupcache/lru"
+
+       "chain/errors"
+       "chain/protocol/bc"
+       "chain/protocol/validation"
+)
+
+// ErrBadTx is returned for transactions failing validation
+var ErrBadTx = errors.New("invalid transaction")
+
+// ValidateTx validates the given transaction. A cache holds
+// per-transaction validation results and is consulted before
+// performing full validation.
+func (c *Chain) ValidateTx(tx *bc.Tx) error {
+       err := c.checkIssuanceWindow(tx)
+       if err != nil {
+               return err
+       }
+       var ok bool
+       err, ok = c.prevalidated.lookup(tx.ID)
+       if !ok {
+               err = validation.ValidateTx(tx, c.InitialBlockHash)
+               c.prevalidated.cache(tx.ID, err)
+       }
+       return errors.Sub(ErrBadTx, err)
+}
+
+type prevalidatedTxsCache struct {
+       mu  sync.Mutex
+       lru *lru.Cache
+}
+
+func (c *prevalidatedTxsCache) lookup(txID bc.Hash) (err error, ok bool) {
+       c.mu.Lock()
+       v, ok := c.lru.Get(txID)
+       c.mu.Unlock()
+       if !ok {
+               return err, ok
+       }
+       if v == nil {
+               return nil, ok
+       }
+       return v.(error), ok
+}
+
+func (c *prevalidatedTxsCache) cache(txID bc.Hash, err error) {
+       c.mu.Lock()
+       c.lru.Add(txID, err)
+       c.mu.Unlock()
+}
+
+func (c *Chain) checkIssuanceWindow(tx *bc.Tx) error {
+       if c.MaxIssuanceWindow == 0 {
+               return nil
+       }
+       for _, entryID := range tx.InputIDs {
+               if _, err := tx.Issuance(entryID); err == nil {
+                       if tx.MinTimeMs+bc.DurationMillis(c.MaxIssuanceWindow) < tx.MaxTimeMs {
+                               return errors.WithDetailf(ErrBadTx, "issuance input's time window is larger than the network maximum (%s)", c.MaxIssuanceWindow)
+                       }
+               }
+       }
+       return nil
+}
diff --git a/protocol/tx_test.go b/protocol/tx_test.go
new file mode 100644 (file)
index 0000000..4a60625
--- /dev/null
@@ -0,0 +1,103 @@
+package protocol
+
+import (
+       "context"
+       "fmt"
+       "testing"
+       "time"
+
+       "golang.org/x/crypto/sha3"
+
+       "chain/crypto/ed25519"
+       "chain/protocol/bc"
+       "chain/protocol/bc/legacy"
+       "chain/protocol/state"
+       "chain/protocol/vm"
+       "chain/protocol/vm/vmutil"
+       "chain/testutil"
+)
+
+func TestBadMaxIssuanceWindow(t *testing.T) {
+       ctx := context.Background()
+       c, b1 := newTestChain(t, time.Now())
+       c.MaxIssuanceWindow = time.Second
+
+       issueTx, _, _ := issue(t, nil, nil, 1)
+
+       got, _, err := c.GenerateBlock(ctx, b1, state.Empty(), time.Now(), []*legacy.Tx{issueTx})
+       if err != nil {
+               t.Fatal(err)
+       }
+       if len(got.Transactions) != 0 {
+               t.Error("expected issuance past max issuance window to be rejected")
+       }
+}
+
+type testDest struct {
+       privKey ed25519.PrivateKey
+}
+
+func newDest(t testing.TB) *testDest {
+       _, priv, err := ed25519.GenerateKey(nil)
+       if err != nil {
+               testutil.FatalErr(t, err)
+       }
+       return &testDest{
+               privKey: priv,
+       }
+}
+
+func (d *testDest) sign(t testing.TB, tx *legacy.Tx, index uint32) {
+       txsighash := tx.SigHash(index)
+       prog, _ := vm.Assemble(fmt.Sprintf("0x%x TXSIGHASH EQUAL", txsighash.Bytes()))
+       h := sha3.Sum256(prog)
+       sig := ed25519.Sign(d.privKey, h[:])
+       tx.Inputs[index].SetArguments([][]byte{vm.Int64Bytes(0), sig, prog})
+}
+
+func (d testDest) controlProgram() ([]byte, error) {
+       pub := d.privKey.Public().(ed25519.PublicKey)
+       return vmutil.P2SPMultiSigProgram([]ed25519.PublicKey{pub}, 1)
+}
+
+type testAsset struct {
+       bc.AssetID
+       testDest
+}
+
+func newAsset(t testing.TB) *testAsset {
+       dest := newDest(t)
+       cp, _ := dest.controlProgram()
+       var initialBlockID bc.Hash
+       assetID := bc.ComputeAssetID(cp, &initialBlockID, 1, &bc.EmptyStringHash)
+
+       return &testAsset{
+               AssetID:  assetID,
+               testDest: *dest,
+       }
+}
+
+func issue(t testing.TB, asset *testAsset, dest *testDest, amount uint64) (*legacy.Tx, *testAsset, *testDest) {
+       if asset == nil {
+               asset = newAsset(t)
+       }
+       if dest == nil {
+               dest = newDest(t)
+       }
+       assetCP, _ := asset.controlProgram()
+       destCP, _ := dest.controlProgram()
+       tx := legacy.NewTx(legacy.TxData{
+               Version: 1,
+               Inputs: []*legacy.TxInput{
+                       legacy.NewIssuanceInput([]byte{1}, amount, nil, bc.Hash{}, assetCP, nil, nil),
+               },
+               Outputs: []*legacy.TxOutput{
+                       legacy.NewTxOutput(asset.AssetID, amount, destCP, nil),
+               },
+               MinTime: bc.Millis(time.Now()),
+               MaxTime: bc.Millis(time.Now().Add(time.Hour)),
+       })
+       asset.sign(t, tx, 0)
+
+       return tx, asset, dest
+}
diff --git a/protocol/validation/block_test.go b/protocol/validation/block_test.go
new file mode 100644 (file)
index 0000000..1cbfbb7
--- /dev/null
@@ -0,0 +1,119 @@
+package validation
+
+import (
+       "testing"
+       "time"
+
+       "chain/protocol/bc"
+       "chain/protocol/bc/legacy"
+       "chain/protocol/vm"
+       "chain/protocol/vm/vmutil"
+)
+
+func TestValidateBlock1(t *testing.T) {
+       b1 := newInitialBlock(t)
+       err := ValidateBlock(b1, nil, b1.ID, dummyValidateTx)
+       if err != nil {
+               t.Errorf("ValidateBlock(%v, nil) = %v, want nil", b1, err)
+       }
+}
+
+func TestValidateBlock1Err(t *testing.T) {
+       b1 := newInitialBlock(t)
+       transactionsRoot := bc.NewHash([32]byte{1})
+       b1.TransactionsRoot = &transactionsRoot // make b1 be invalid
+       err := ValidateBlock(b1, nil, b1.ID, dummyValidateTx)
+       if err == nil {
+               t.Errorf("ValidateBlock(%v, nil) = nil, want error", b1)
+       }
+}
+
+func TestValidateBlock2(t *testing.T) {
+       b1 := newInitialBlock(t)
+       b2 := generate(t, b1)
+       err := ValidateBlock(b2, b1, b2.ID, dummyValidateTx)
+       if err != nil {
+               t.Errorf("ValidateBlock(%v, %v) = %v, want nil", b2, b1, err)
+       }
+}
+
+func TestValidateBlock2Err(t *testing.T) {
+       b1 := newInitialBlock(t)
+       b2 := generate(t, b1)
+       transactionsRoot := bc.NewHash([32]byte{1})
+       b2.TransactionsRoot = &transactionsRoot // make b2 be invalid
+       err := ValidateBlock(b2, b1, b2.ID, dummyValidateTx)
+       if err == nil {
+               t.Errorf("ValidateBlock(%v, %v) = nil, want error", b2, b1)
+       }
+}
+
+func TestValidateBlockSig2(t *testing.T) {
+       b1 := newInitialBlock(t)
+       b2 := generate(t, b1)
+       err := ValidateBlockSig(b2, b1.NextConsensusProgram)
+       if err != nil {
+               t.Errorf("ValidateBlockSig(%v, %v) = %v, want nil", b2, b1, err)
+       }
+}
+
+func TestValidateBlockSig2Err(t *testing.T) {
+       b1 := newInitialBlock(t)
+       b2 := generate(t, b1)
+       prog := []byte{byte(vm.OP_FALSE)} // make b2 be invalid
+       err := ValidateBlockSig(b2, prog)
+       if err == nil {
+               t.Errorf("ValidateBlockSig(%v, %v) = nil, want error", b2, b1)
+       }
+}
+
+func dummyValidateTx(*bc.Tx) error {
+       return nil
+}
+
+func newInitialBlock(tb testing.TB) *bc.Block {
+       script, err := vmutil.BlockMultiSigProgram(nil, 0)
+       if err != nil {
+               tb.Fatal(err)
+       }
+
+       root, err := bc.MerkleRoot(nil) // calculate the zero value of the tx merkle root
+       if err != nil {
+               tb.Fatal(err)
+       }
+
+       b := &legacy.Block{
+               BlockHeader: legacy.BlockHeader{
+                       Version:     1,
+                       Height:      1,
+                       TimestampMS: bc.Millis(time.Now()),
+                       BlockCommitment: legacy.BlockCommitment{
+                               TransactionsMerkleRoot: root,
+                               ConsensusProgram:       script,
+                       },
+               },
+       }
+       return legacy.MapBlock(b)
+}
+
+func generate(tb testing.TB, prev *bc.Block) *bc.Block {
+       b := &legacy.Block{
+               BlockHeader: legacy.BlockHeader{
+                       Version:           1,
+                       Height:            prev.Height + 1,
+                       PreviousBlockHash: prev.ID,
+                       TimestampMS:       prev.TimestampMs + 1,
+                       BlockCommitment: legacy.BlockCommitment{
+                               ConsensusProgram: prev.NextConsensusProgram,
+                       },
+               },
+       }
+
+       var err error
+       b.TransactionsMerkleRoot, err = bc.MerkleRoot(nil)
+       if err != nil {
+               tb.Fatal(err)
+       }
+
+       return legacy.MapBlock(b)
+}
diff --git a/protocol/validation/fuzz_test.go b/protocol/validation/fuzz_test.go
new file mode 100644 (file)
index 0000000..b489f8b
--- /dev/null
@@ -0,0 +1,29 @@
+package validation
+
+import (
+       "testing"
+
+       "chain/protocol/bc"
+       "chain/protocol/bc/legacy"
+)
+
+func TestFuzzAssetIdNilPointer(t *testing.T) {
+       const (
+               blockchainID = `50935a092ffad7ec9fbac4f4486db6c3b8cd5b9f51cf697248584dde286a7220`
+               input        = `07300730303030303030000001302b3030303030303030303030303030303030303030303030303030303030303030303030303030303030303000253030303030303030303030303030303030303030303030303030303030303030303030303000`
+       )
+
+       var testBlockchainID bc.Hash
+       err := testBlockchainID.UnmarshalText([]byte(blockchainID))
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       var tx legacy.Tx
+       err = tx.UnmarshalText([]byte(input))
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       ValidateTx(tx.Tx, testBlockchainID)
+}
diff --git a/protocol/validation/validation.go b/protocol/validation/validation.go
new file mode 100644 (file)
index 0000000..4073b20
--- /dev/null
@@ -0,0 +1,527 @@
+package validation
+
+import (
+       "fmt"
+
+       "chain/errors"
+       "chain/math/checked"
+       "chain/protocol/bc"
+       "chain/protocol/vm"
+)
+
+// validationState contains the context that must propagate through
+// the transaction graph when validating entries.
+type validationState struct {
+       // The ID of the blockchain
+       blockchainID bc.Hash
+
+       // The enclosing transaction object
+       tx *bc.Tx
+
+       // The ID of the nearest enclosing entry
+       entryID bc.Hash
+
+       // The source position, for validating ValueSources
+       sourcePos uint64
+
+       // The destination position, for validating ValueDestinations
+       destPos uint64
+
+       // Memoized per-entry validation results
+       cache map[bc.Hash]error
+}
+
+var (
+       errBadTimeRange          = errors.New("bad time range")
+       errEmptyResults          = errors.New("transaction has no results")
+       errMismatchedAssetID     = errors.New("mismatched asset id")
+       errMismatchedBlock       = errors.New("mismatched block")
+       errMismatchedMerkleRoot  = errors.New("mismatched merkle root")
+       errMismatchedPosition    = errors.New("mismatched value source/dest positions")
+       errMismatchedReference   = errors.New("mismatched reference")
+       errMismatchedValue       = errors.New("mismatched value")
+       errMisorderedBlockHeight = errors.New("misordered block height")
+       errMisorderedBlockTime   = errors.New("misordered block time")
+       errMissingField          = errors.New("missing required field")
+       errNoPrevBlock           = errors.New("no previous block")
+       errNoSource              = errors.New("no source for value")
+       errNonemptyExtHash       = errors.New("non-empty extension hash")
+       errOverflow              = errors.New("arithmetic overflow/underflow")
+       errPosition              = errors.New("invalid source or destination position")
+       errTxVersion             = errors.New("invalid transaction version")
+       errUnbalanced            = errors.New("unbalanced")
+       errUntimelyTransaction   = errors.New("block timestamp outside transaction time range")
+       errVersionRegression     = errors.New("version regression")
+       errWrongBlockchain       = errors.New("wrong blockchain")
+       errZeroTime              = errors.New("timerange has one or two bounds set to zero")
+)
+
+func checkValid(vs *validationState, e bc.Entry) (err error) {
+       entryID := bc.EntryID(e)
+       var ok bool
+       if err, ok = vs.cache[entryID]; ok {
+               return err
+       }
+
+       defer func() {
+               vs.cache[entryID] = err
+       }()
+
+       switch e := e.(type) {
+       case *bc.TxHeader:
+               // This does only part of the work of validating a tx header. The
+               // block-related parts of tx validation are in ValidateBlock.
+               if e.MaxTimeMs > 0 {
+                       if e.MaxTimeMs < e.MinTimeMs {
+                               return errors.WithDetailf(errBadTimeRange, "min time %d, max time %d", e.MinTimeMs, e.MaxTimeMs)
+                       }
+               }
+
+               for i, resID := range e.ResultIds {
+                       resultEntry := vs.tx.Entries[*resID]
+                       vs2 := *vs
+                       vs2.entryID = *resID
+                       err = checkValid(&vs2, resultEntry)
+                       if err != nil {
+                               return errors.Wrapf(err, "checking result %d", i)
+                       }
+               }
+
+               if e.Version == 1 {
+                       if len(e.ResultIds) == 0 {
+                               return errEmptyResults
+                       }
+
+                       if e.ExtHash != nil && !e.ExtHash.IsZero() {
+                               return errNonemptyExtHash
+                       }
+               }
+
+       case *bc.Mux:
+               err = vm.Verify(NewTxVMContext(vs.tx, e, e.Program, e.WitnessArguments))
+               if err != nil {
+                       return errors.Wrap(err, "checking mux program")
+               }
+
+               for i, src := range e.Sources {
+                       vs2 := *vs
+                       vs2.sourcePos = uint64(i)
+                       err = checkValidSrc(&vs2, src)
+                       if err != nil {
+                               return errors.Wrapf(err, "checking mux source %d", i)
+                       }
+               }
+               for i, dest := range e.WitnessDestinations {
+                       vs2 := *vs
+                       vs2.destPos = uint64(i)
+                       err = checkValidDest(&vs2, dest)
+                       if err != nil {
+                               return errors.Wrapf(err, "checking mux destination %d", i)
+                       }
+               }
+
+               parity := make(map[bc.AssetID]int64)
+               for i, src := range e.Sources {
+                       sum, ok := checked.AddInt64(parity[*src.Value.AssetId], int64(src.Value.Amount))
+                       if !ok {
+                               return errors.WithDetailf(errOverflow, "adding %d units of asset %x from mux source %d to total %d overflows int64", src.Value.Amount, src.Value.AssetId.Bytes(), i, parity[*src.Value.AssetId])
+                       }
+                       parity[*src.Value.AssetId] = sum
+               }
+
+               for i, dest := range e.WitnessDestinations {
+                       sum, ok := parity[*dest.Value.AssetId]
+                       if !ok {
+                               return errors.WithDetailf(errNoSource, "mux destination %d, asset %x, has no corresponding source", i, dest.Value.AssetId.Bytes())
+                       }
+
+                       diff, ok := checked.SubInt64(sum, int64(dest.Value.Amount))
+                       if !ok {
+                               return errors.WithDetailf(errOverflow, "subtracting %d units of asset %x from mux destination %d from total %d underflows int64", dest.Value.Amount, dest.Value.AssetId.Bytes(), i, sum)
+                       }
+                       parity[*dest.Value.AssetId] = diff
+               }
+
+               for assetID, amount := range parity {
+                       if amount != 0 {
+                               return errors.WithDetailf(errUnbalanced, "asset %x sources - destinations = %d (should be 0)", assetID.Bytes(), amount)
+                       }
+               }
+
+               if vs.tx.Version == 1 && e.ExtHash != nil && !e.ExtHash.IsZero() {
+                       return errNonemptyExtHash
+               }
+
+       case *bc.Nonce:
+               err = vm.Verify(NewTxVMContext(vs.tx, e, e.Program, e.WitnessArguments))
+               if err != nil {
+                       return errors.Wrap(err, "checking nonce program")
+               }
+               tr, err := vs.tx.TimeRange(*e.TimeRangeId)
+               if err != nil {
+                       return errors.Wrap(err, "getting nonce timerange")
+               }
+               vs2 := *vs
+               vs2.entryID = *e.TimeRangeId
+               err = checkValid(&vs2, tr)
+               if err != nil {
+                       return errors.Wrap(err, "checking nonce timerange")
+               }
+
+               if tr.MinTimeMs == 0 || tr.MaxTimeMs == 0 {
+                       return errZeroTime
+               }
+
+               if vs.tx.Version == 1 && e.ExtHash != nil && !e.ExtHash.IsZero() {
+                       return errNonemptyExtHash
+               }
+
+       case *bc.Output:
+               vs2 := *vs
+               vs2.sourcePos = 0
+               err = checkValidSrc(&vs2, e.Source)
+               if err != nil {
+                       return errors.Wrap(err, "checking output source")
+               }
+
+               if vs.tx.Version == 1 && e.ExtHash != nil && !e.ExtHash.IsZero() {
+                       return errNonemptyExtHash
+               }
+
+       case *bc.Retirement:
+               vs2 := *vs
+               vs2.sourcePos = 0
+               err = checkValidSrc(&vs2, e.Source)
+               if err != nil {
+                       return errors.Wrap(err, "checking retirement source")
+               }
+
+               if vs.tx.Version == 1 && e.ExtHash != nil && !e.ExtHash.IsZero() {
+                       return errNonemptyExtHash
+               }
+
+       case *bc.TimeRange:
+               if e.MinTimeMs > vs.tx.MinTimeMs {
+                       return errBadTimeRange
+               }
+               if e.MaxTimeMs > 0 && e.MaxTimeMs < vs.tx.MaxTimeMs {
+                       return errBadTimeRange
+               }
+               if vs.tx.Version == 1 && e.ExtHash != nil && !e.ExtHash.IsZero() {
+                       return errNonemptyExtHash
+               }
+
+       case *bc.Issuance:
+               if *e.WitnessAssetDefinition.InitialBlockId != vs.blockchainID {
+                       return errors.WithDetailf(errWrongBlockchain, "current blockchain %x, asset defined on blockchain %x", vs.blockchainID.Bytes(), e.WitnessAssetDefinition.InitialBlockId.Bytes())
+               }
+
+               computedAssetID := e.WitnessAssetDefinition.ComputeAssetID()
+               if computedAssetID != *e.Value.AssetId {
+                       return errors.WithDetailf(errMismatchedAssetID, "asset ID is %x, issuance wants %x", computedAssetID.Bytes(), e.Value.AssetId.Bytes())
+               }
+
+               anchor, ok := vs.tx.Entries[*e.AnchorId]
+               if !ok {
+                       return errors.Wrapf(bc.ErrMissingEntry, "entry for issuance anchor %x not found", e.AnchorId.Bytes())
+               }
+
+               err = vm.Verify(NewTxVMContext(vs.tx, e, e.WitnessAssetDefinition.IssuanceProgram, e.WitnessArguments))
+               if err != nil {
+                       return errors.Wrap(err, "checking issuance program")
+               }
+
+               var anchored *bc.Hash
+               switch a := anchor.(type) {
+               case *bc.Nonce:
+                       anchored = a.WitnessAnchoredId
+
+               case *bc.Spend:
+                       anchored = a.WitnessAnchoredId
+
+               case *bc.Issuance:
+                       anchored = a.WitnessAnchoredId
+
+               default:
+                       return errors.WithDetailf(bc.ErrEntryType, "issuance anchor has type %T, should be nonce, spend, or issuance", anchor)
+               }
+
+               if *anchored != vs.entryID {
+                       return errors.WithDetailf(errMismatchedReference, "issuance %x anchor is for %x", vs.entryID.Bytes(), anchored.Bytes())
+               }
+
+               anchorVS := *vs
+               anchorVS.entryID = *e.AnchorId
+               err = checkValid(&anchorVS, anchor)
+               if err != nil {
+                       return errors.Wrap(err, "checking issuance anchor")
+               }
+
+               destVS := *vs
+               destVS.destPos = 0
+               err = checkValidDest(&destVS, e.WitnessDestination)
+               if err != nil {
+                       return errors.Wrap(err, "checking issuance destination")
+               }
+
+               if vs.tx.Version == 1 && e.ExtHash != nil && !e.ExtHash.IsZero() {
+                       return errNonemptyExtHash
+               }
+
+       case *bc.Spend:
+               if e.SpentOutputId == nil {
+                       return errors.Wrap(errMissingField, "spend without spent output ID")
+               }
+               spentOutput, err := vs.tx.Output(*e.SpentOutputId)
+               if err != nil {
+                       return errors.Wrap(err, "getting spend prevout")
+               }
+               err = vm.Verify(NewTxVMContext(vs.tx, e, spentOutput.ControlProgram, e.WitnessArguments))
+               if err != nil {
+                       return errors.Wrap(err, "checking control program")
+               }
+
+               eq, err := spentOutput.Source.Value.Equal(e.WitnessDestination.Value)
+               if err != nil {
+                       return err
+               }
+               if !eq {
+                       return errors.WithDetailf(
+                               errMismatchedValue,
+                               "previous output is for %d unit(s) of %x, spend wants %d unit(s) of %x",
+                               spentOutput.Source.Value.Amount,
+                               spentOutput.Source.Value.AssetId.Bytes(),
+                               e.WitnessDestination.Value.Amount,
+                               e.WitnessDestination.Value.AssetId.Bytes(),
+                       )
+               }
+
+               vs2 := *vs
+               vs2.destPos = 0
+               err = checkValidDest(&vs2, e.WitnessDestination)
+               if err != nil {
+                       return errors.Wrap(err, "checking spend destination")
+               }
+
+               if vs.tx.Version == 1 && e.ExtHash != nil && !e.ExtHash.IsZero() {
+                       return errNonemptyExtHash
+               }
+
+       default:
+               return fmt.Errorf("entry has unexpected type %T", e)
+       }
+
+       return nil
+}
+
+func checkValidBlockHeader(bh *bc.BlockHeader) error {
+       if bh.Version == 1 && bh.ExtHash != nil && !bh.ExtHash.IsZero() {
+               return errNonemptyExtHash
+       }
+       return nil
+}
+
+func checkValidSrc(vstate *validationState, vs *bc.ValueSource) error {
+       if vs == nil {
+               return errors.Wrap(errMissingField, "empty value source")
+       }
+       if vs.Ref == nil {
+               return errors.Wrap(errMissingField, "missing ref on value source")
+       }
+       if vs.Value == nil || vs.Value.AssetId == nil {
+               return errors.Wrap(errMissingField, "missing value on value source")
+       }
+
+       e, ok := vstate.tx.Entries[*vs.Ref]
+       if !ok {
+               return errors.Wrapf(bc.ErrMissingEntry, "entry for value source %x not found", vs.Ref.Bytes())
+       }
+       vstate2 := *vstate
+       vstate2.entryID = *vs.Ref
+       err := checkValid(&vstate2, e)
+       if err != nil {
+               return errors.Wrap(err, "checking value source")
+       }
+
+       var dest *bc.ValueDestination
+       switch ref := e.(type) {
+       case *bc.Issuance:
+               if vs.Position != 0 {
+                       return errors.Wrapf(errPosition, "invalid position %d for issuance source", vs.Position)
+               }
+               dest = ref.WitnessDestination
+
+       case *bc.Spend:
+               if vs.Position != 0 {
+                       return errors.Wrapf(errPosition, "invalid position %d for spend source", vs.Position)
+               }
+               dest = ref.WitnessDestination
+
+       case *bc.Mux:
+               if vs.Position >= uint64(len(ref.WitnessDestinations)) {
+                       return errors.Wrapf(errPosition, "invalid position %d for %d-destination mux source", vs.Position, len(ref.WitnessDestinations))
+               }
+               dest = ref.WitnessDestinations[vs.Position]
+
+       default:
+               return errors.Wrapf(bc.ErrEntryType, "value source is %T, should be issuance, spend, or mux", e)
+       }
+
+       if dest.Ref == nil || *dest.Ref != vstate.entryID {
+               return errors.Wrapf(errMismatchedReference, "value source for %x has disagreeing destination %x", vstate.entryID.Bytes(), dest.Ref.Bytes())
+       }
+
+       if dest.Position != vstate.sourcePos {
+               return errors.Wrapf(errMismatchedPosition, "value source position %d disagrees with %d", dest.Position, vstate.sourcePos)
+       }
+
+       eq, err := dest.Value.Equal(vs.Value)
+       if err != nil {
+               return errors.Sub(errMissingField, err)
+       }
+       if !eq {
+               return errors.Wrapf(errMismatchedValue, "source value %v disagrees with %v", dest.Value, vs.Value)
+       }
+
+       return nil
+}
+
+func checkValidDest(vs *validationState, vd *bc.ValueDestination) error {
+       if vd == nil {
+               return errors.Wrap(errMissingField, "empty value destination")
+       }
+       if vd.Ref == nil {
+               return errors.Wrap(errMissingField, "missing ref on value destination")
+       }
+       if vd.Value == nil || vd.Value.AssetId == nil {
+               return errors.Wrap(errMissingField, "missing value on value source")
+       }
+
+       e, ok := vs.tx.Entries[*vd.Ref]
+       if !ok {
+               return errors.Wrapf(bc.ErrMissingEntry, "entry for value destination %x not found", vd.Ref.Bytes())
+       }
+       var src *bc.ValueSource
+       switch ref := e.(type) {
+       case *bc.Output:
+               if vd.Position != 0 {
+                       return errors.Wrapf(errPosition, "invalid position %d for output destination", vd.Position)
+               }
+               src = ref.Source
+
+       case *bc.Retirement:
+               if vd.Position != 0 {
+                       return errors.Wrapf(errPosition, "invalid position %d for retirement destination", vd.Position)
+               }
+               src = ref.Source
+
+       case *bc.Mux:
+               if vd.Position >= uint64(len(ref.Sources)) {
+                       return errors.Wrapf(errPosition, "invalid position %d for %d-source mux destination", vd.Position, len(ref.Sources))
+               }
+               src = ref.Sources[vd.Position]
+
+       default:
+               return errors.Wrapf(bc.ErrEntryType, "value destination is %T, should be output, retirement, or mux", e)
+       }
+
+       if src.Ref == nil || *src.Ref != vs.entryID {
+               return errors.Wrapf(errMismatchedReference, "value destination for %x has disagreeing source %x", vs.entryID.Bytes(), src.Ref.Bytes())
+       }
+
+       if src.Position != vs.destPos {
+               return errors.Wrapf(errMismatchedPosition, "value destination position %d disagrees with %d", src.Position, vs.destPos)
+       }
+
+       eq, err := src.Value.Equal(vd.Value)
+       if err != nil {
+               return errors.Sub(errMissingField, err)
+       }
+       if !eq {
+               return errors.Wrapf(errMismatchedValue, "destination value %v disagrees with %v", src.Value, vd.Value)
+       }
+
+       return nil
+}
+
+// ValidateBlockSig runs the consensus program prog on b.
+func ValidateBlockSig(b *bc.Block, prog []byte) error {
+       vmContext := newBlockVMContext(b, prog, b.WitnessArguments)
+       err := vm.Verify(vmContext)
+       return errors.Wrap(err, "evaluating previous block's next consensus program")
+}
+
+// ValidateBlock validates a block and the transactions within.
+// It does not run the consensus program; for that, see ValidateBlockSig.
+func ValidateBlock(b, prev *bc.Block, initialBlockID bc.Hash, validateTx func(*bc.Tx) error) error {
+       if b.Height > 1 {
+               if prev == nil {
+                       return errors.WithDetailf(errNoPrevBlock, "height %d", b.Height)
+               }
+               err := validateBlockAgainstPrev(b, prev)
+               if err != nil {
+                       return err
+               }
+       }
+
+       err := checkValidBlockHeader(b.BlockHeader)
+       if err != nil {
+               return errors.Wrap(err, "checking block header")
+       }
+
+       for i, tx := range b.Transactions {
+               if b.Version == 1 && tx.Version != 1 {
+                       return errors.WithDetailf(errTxVersion, "block version %d, transaction version %d", b.Version, tx.Version)
+               }
+               if tx.MaxTimeMs > 0 && b.TimestampMs > tx.MaxTimeMs {
+                       return errors.WithDetailf(errUntimelyTransaction, "block timestamp %d, transaction time range %d-%d", b.TimestampMs, tx.MinTimeMs, tx.MaxTimeMs)
+               }
+               if tx.MinTimeMs > 0 && b.TimestampMs > 0 && b.TimestampMs < tx.MinTimeMs {
+                       return errors.WithDetailf(errUntimelyTransaction, "block timestamp %d, transaction time range %d-%d", b.TimestampMs, tx.MinTimeMs, tx.MaxTimeMs)
+               }
+
+               err = validateTx(tx)
+               if err != nil {
+                       return errors.Wrapf(err, "validity of transaction %d of %d", i, len(b.Transactions))
+               }
+       }
+
+       txRoot, err := bc.MerkleRoot(b.Transactions)
+       if err != nil {
+               return errors.Wrap(err, "computing transaction merkle root")
+       }
+
+       if txRoot != *b.TransactionsRoot {
+               return errors.WithDetailf(errMismatchedMerkleRoot, "computed %x, current block wants %x", txRoot.Bytes(), b.TransactionsRoot.Bytes())
+       }
+
+       return nil
+}
+
+func validateBlockAgainstPrev(b, prev *bc.Block) error {
+       if b.Version < prev.Version {
+               return errors.WithDetailf(errVersionRegression, "previous block verson %d, current block version %d", prev.Version, b.Version)
+       }
+       if b.Height != prev.Height+1 {
+               return errors.WithDetailf(errMisorderedBlockHeight, "previous block height %d, current block height %d", prev.Height, b.Height)
+       }
+       if prev.ID != *b.PreviousBlockId {
+               return errors.WithDetailf(errMismatchedBlock, "previous block ID %x, current block wants %x", prev.ID.Bytes(), b.PreviousBlockId.Bytes())
+       }
+       if b.TimestampMs <= prev.TimestampMs {
+               return errors.WithDetailf(errMisorderedBlockTime, "previous block time %d, current block time %d", prev.TimestampMs, b.TimestampMs)
+       }
+       return nil
+}
+
+// ValidateTx validates a transaction.
+func ValidateTx(tx *bc.Tx, initialBlockID bc.Hash) error {
+       vs := &validationState{
+               blockchainID: initialBlockID,
+               tx:           tx,
+               entryID:      tx.ID,
+
+               cache: make(map[bc.Hash]error),
+       }
+       return checkValid(vs, tx.TxHeader)
+}
diff --git a/protocol/validation/validation_test.go b/protocol/validation/validation_test.go
new file mode 100644 (file)
index 0000000..0fa728f
--- /dev/null
@@ -0,0 +1,582 @@
+package validation
+
+import (
+       "fmt"
+       "math"
+       "testing"
+       "time"
+
+       "chain/crypto/sha3pool"
+       "chain/errors"
+       "chain/protocol/bc"
+       "chain/protocol/bc/bctest"
+       "chain/protocol/bc/legacy"
+       "chain/protocol/vm"
+       "chain/testutil"
+
+       "github.com/davecgh/go-spew/spew"
+       "github.com/golang/protobuf/proto"
+)
+
+func init() {
+       spew.Config.DisableMethods = true
+}
+
+func TestTxValidation(t *testing.T) {
+       var (
+               tx      *bc.Tx
+               vs      *validationState
+               fixture *txFixture
+
+               // the mux from tx, pulled out for convenience
+               mux *bc.Mux
+       )
+
+       cases := []struct {
+               desc string // description of the test case
+               f    func() // function to adjust tx, vs, and/or mux
+               err  error  // expected error
+       }{
+               {
+                       desc: "base case",
+               },
+               {
+                       desc: "failing mux program",
+                       f: func() {
+                               mux.Program.Code = []byte{byte(vm.OP_FALSE)}
+                       },
+                       err: vm.ErrFalseVMResult,
+               },
+               {
+                       desc: "unbalanced mux amounts",
+                       f: func() {
+                               mux.Sources[0].Value.Amount++
+                               iss := tx.Entries[*mux.Sources[0].Ref].(*bc.Issuance)
+                               iss.WitnessDestination.Value.Amount++
+                       },
+                       err: errUnbalanced,
+               },
+               {
+                       desc: "overflowing mux source amounts",
+                       f: func() {
+                               mux.Sources[0].Value.Amount = math.MaxInt64
+                               iss := tx.Entries[*mux.Sources[0].Ref].(*bc.Issuance)
+                               iss.WitnessDestination.Value.Amount = math.MaxInt64
+                       },
+                       err: errOverflow,
+               },
+               {
+                       desc: "underflowing mux destination amounts",
+                       f: func() {
+                               mux.WitnessDestinations[0].Value.Amount = math.MaxInt64
+                               out := tx.Entries[*mux.WitnessDestinations[0].Ref].(*bc.Output)
+                               out.Source.Value.Amount = math.MaxInt64
+                               mux.WitnessDestinations[1].Value.Amount = math.MaxInt64
+                               out = tx.Entries[*mux.WitnessDestinations[1].Ref].(*bc.Output)
+                               out.Source.Value.Amount = math.MaxInt64
+                       },
+                       err: errOverflow,
+               },
+               {
+                       desc: "unbalanced mux assets",
+                       f: func() {
+                               mux.Sources[1].Value.AssetId = newAssetID(255)
+                               sp := tx.Entries[*mux.Sources[1].Ref].(*bc.Spend)
+                               sp.WitnessDestination.Value.AssetId = newAssetID(255)
+                       },
+                       err: errUnbalanced,
+               },
+               {
+                       desc: "nonempty mux exthash",
+                       f: func() {
+                               mux.ExtHash = newHash(1)
+                       },
+                       err: errNonemptyExtHash,
+               },
+               {
+                       desc: "nonempty mux exthash, but that's OK",
+                       f: func() {
+                               tx.Version = 2
+                               mux.ExtHash = newHash(1)
+                       },
+               },
+               {
+                       desc: "failing nonce program",
+                       f: func() {
+                               iss := txIssuance(t, tx, 0)
+                               nonce := tx.Entries[*iss.AnchorId].(*bc.Nonce)
+                               nonce.Program.Code = []byte{byte(vm.OP_FALSE)}
+                       },
+                       err: vm.ErrFalseVMResult,
+               },
+               {
+                       desc: "nonce exthash nonempty",
+                       f: func() {
+                               iss := txIssuance(t, tx, 0)
+                               nonce := tx.Entries[*iss.AnchorId].(*bc.Nonce)
+                               nonce.ExtHash = newHash(1)
+                       },
+                       err: errNonemptyExtHash,
+               },
+               {
+                       desc: "nonce exthash nonempty, but that's OK",
+                       f: func() {
+                               tx.Version = 2
+                               iss := txIssuance(t, tx, 0)
+                               nonce := tx.Entries[*iss.AnchorId].(*bc.Nonce)
+                               nonce.ExtHash = newHash(1)
+                       },
+               },
+               {
+                       desc: "nonce timerange misordered",
+                       f: func() {
+                               iss := txIssuance(t, tx, 0)
+                               nonce := tx.Entries[*iss.AnchorId].(*bc.Nonce)
+                               tr := tx.Entries[*nonce.TimeRangeId].(*bc.TimeRange)
+                               tr.MinTimeMs = tr.MaxTimeMs + 1
+                       },
+                       err: errBadTimeRange,
+               },
+               {
+                       desc: "nonce timerange disagrees with tx timerange",
+                       f: func() {
+                               iss := txIssuance(t, tx, 0)
+                               nonce := tx.Entries[*iss.AnchorId].(*bc.Nonce)
+                               tr := tx.Entries[*nonce.TimeRangeId].(*bc.TimeRange)
+                               tr.MaxTimeMs = tx.MaxTimeMs - 1
+                       },
+                       err: errBadTimeRange,
+               },
+               {
+                       desc: "nonce timerange exthash nonempty",
+                       f: func() {
+                               iss := txIssuance(t, tx, 0)
+                               nonce := tx.Entries[*iss.AnchorId].(*bc.Nonce)
+                               tr := tx.Entries[*nonce.TimeRangeId].(*bc.TimeRange)
+                               tr.ExtHash = newHash(1)
+                       },
+                       err: errNonemptyExtHash,
+               },
+               {
+                       desc: "nonce timerange exthash nonempty, but that's OK",
+                       f: func() {
+                               tx.Version = 2
+                               iss := txIssuance(t, tx, 0)
+                               nonce := tx.Entries[*iss.AnchorId].(*bc.Nonce)
+                               tr := tx.Entries[*nonce.TimeRangeId].(*bc.TimeRange)
+                               tr.ExtHash = newHash(1)
+                       },
+               },
+               {
+                       desc: "mismatched output source / mux dest position",
+                       f: func() {
+                               tx.Entries[*tx.ResultIds[0]].(*bc.Output).Source.Position = 1
+                       },
+                       err: errMismatchedPosition,
+               },
+               {
+                       desc: "mismatched output source and mux dest",
+                       f: func() {
+                               // For this test, it's necessary to construct a mostly
+                               // identical second transaction in order to get a similar but
+                               // not equal output entry for the mux to falsely point
+                               // to. That entry must be added to the first tx's Entries map.
+                               fixture.txOutputs[0].ReferenceData = []byte{1}
+                               fixture2 := sample(t, fixture)
+                               tx2 := legacy.NewTx(*fixture2.tx).Tx
+                               out2ID := tx2.ResultIds[0]
+                               out2 := tx2.Entries[*out2ID].(*bc.Output)
+                               tx.Entries[*out2ID] = out2
+                               mux.WitnessDestinations[0].Ref = out2ID
+                       },
+                       err: errMismatchedReference,
+               },
+               {
+                       desc: "invalid mux destination position",
+                       f: func() {
+                               mux.WitnessDestinations[0].Position = 1
+                       },
+                       err: errPosition,
+               },
+               {
+                       desc: "mismatched mux dest value / output source value",
+                       f: func() {
+                               outID := tx.ResultIds[0]
+                               out := tx.Entries[*outID].(*bc.Output)
+                               mux.WitnessDestinations[0].Value = &bc.AssetAmount{
+                                       AssetId: out.Source.Value.AssetId,
+                                       Amount:  out.Source.Value.Amount + 1,
+                               }
+                               mux.Sources[0].Value.Amount++ // the mux must still balance
+                       },
+                       err: errMismatchedValue,
+               },
+               {
+                       desc: "output exthash nonempty",
+                       f: func() {
+                               tx.Entries[*tx.ResultIds[0]].(*bc.Output).ExtHash = newHash(1)
+                       },
+                       err: errNonemptyExtHash,
+               },
+               {
+                       desc: "output exthash nonempty, but that's OK",
+                       f: func() {
+                               tx.Version = 2
+                               tx.Entries[*tx.ResultIds[0]].(*bc.Output).ExtHash = newHash(1)
+                       },
+               },
+               {
+                       desc: "misordered tx time range",
+                       f: func() {
+                               tx.MinTimeMs = tx.MaxTimeMs + 1
+                       },
+                       err: errBadTimeRange,
+               },
+               {
+                       desc: "empty tx results",
+                       f: func() {
+                               tx.ResultIds = nil
+                       },
+                       err: errEmptyResults,
+               },
+               {
+                       desc: "empty tx results, but that's OK",
+                       f: func() {
+                               tx.Version = 2
+                               tx.ResultIds = nil
+                       },
+               },
+               {
+                       desc: "tx header exthash nonempty",
+                       f: func() {
+                               tx.ExtHash = newHash(1)
+                       },
+                       err: errNonemptyExtHash,
+               },
+               {
+                       desc: "tx header exthash nonempty, but that's OK",
+                       f: func() {
+                               tx.Version = 2
+                               tx.ExtHash = newHash(1)
+                       },
+               },
+               {
+                       desc: "wrong blockchain",
+                       f: func() {
+                               vs.blockchainID = *newHash(2)
+                       },
+                       err: errWrongBlockchain,
+               },
+               {
+                       desc: "issuance asset ID mismatch",
+                       f: func() {
+                               iss := txIssuance(t, tx, 0)
+                               iss.Value.AssetId = newAssetID(1)
+                       },
+                       err: errMismatchedAssetID,
+               },
+               {
+                       desc: "issuance program failure",
+                       f: func() {
+                               iss := txIssuance(t, tx, 0)
+                               iss.WitnessArguments[0] = []byte{}
+                       },
+                       err: vm.ErrFalseVMResult,
+               },
+               {
+                       desc: "issuance exthash nonempty",
+                       f: func() {
+                               iss := txIssuance(t, tx, 0)
+                               iss.ExtHash = newHash(1)
+                       },
+                       err: errNonemptyExtHash,
+               },
+               {
+                       desc: "issuance exthash nonempty, but that's OK",
+                       f: func() {
+                               tx.Version = 2
+                               iss := txIssuance(t, tx, 0)
+                               iss.ExtHash = newHash(1)
+                       },
+               },
+               {
+                       desc: "spend control program failure",
+                       f: func() {
+                               spend := txSpend(t, tx, 1)
+                               spend.WitnessArguments[0] = []byte{}
+                       },
+                       err: vm.ErrFalseVMResult,
+               },
+               {
+                       desc: "mismatched spent source/witness value",
+                       f: func() {
+                               spend := txSpend(t, tx, 1)
+                               spentOutput := tx.Entries[*spend.SpentOutputId].(*bc.Output)
+                               spentOutput.Source.Value = &bc.AssetAmount{
+                                       AssetId: spend.WitnessDestination.Value.AssetId,
+                                       Amount:  spend.WitnessDestination.Value.Amount + 1,
+                               }
+                       },
+                       err: errMismatchedValue,
+               },
+               {
+                       desc: "spend exthash nonempty",
+                       f: func() {
+                               spend := txSpend(t, tx, 1)
+                               spend.ExtHash = newHash(1)
+                       },
+                       err: errNonemptyExtHash,
+               },
+               {
+                       desc: "spend exthash nonempty, but that's OK",
+                       f: func() {
+                               tx.Version = 2
+                               spend := txSpend(t, tx, 1)
+                               spend.ExtHash = newHash(1)
+                       },
+               },
+       }
+
+       for _, c := range cases {
+               t.Run(c.desc, func(t *testing.T) {
+                       fixture = sample(t, nil)
+                       tx = legacy.NewTx(*fixture.tx).Tx
+                       vs = &validationState{
+                               blockchainID: fixture.initialBlockID,
+                               tx:           tx,
+                               entryID:      tx.ID,
+                               cache:        make(map[bc.Hash]error),
+                       }
+                       out := tx.Entries[*tx.ResultIds[0]].(*bc.Output)
+                       muxID := out.Source.Ref
+                       mux = tx.Entries[*muxID].(*bc.Mux)
+
+                       if c.f != nil {
+                               c.f()
+                       }
+                       err := checkValid(vs, tx.TxHeader)
+                       if rootErr(err) != c.err {
+                               t.Errorf("got error %s, want %s; validationState is:\n%s", err, c.err, spew.Sdump(vs))
+                       }
+               })
+       }
+}
+
+func TestNoncelessIssuance(t *testing.T) {
+       tx := bctest.NewIssuanceTx(t, bc.EmptyStringHash, func(tx *legacy.Tx) {
+               // Remove the issuance nonce.
+               tx.Inputs[0].TypedInput.(*legacy.IssuanceInput).Nonce = nil
+       })
+
+       err := ValidateTx(legacy.MapTx(&tx.TxData), bc.EmptyStringHash)
+       if errors.Root(err) != bc.ErrMissingEntry {
+               t.Fatalf("got %s, want %s", err, bc.ErrMissingEntry)
+       }
+}
+
+func TestBlockHeaderValid(t *testing.T) {
+       base := bc.NewBlockHeader(1, 1, &bc.Hash{}, 1, &bc.Hash{}, &bc.Hash{}, nil)
+       baseBytes, _ := proto.Marshal(base)
+
+       var bh bc.BlockHeader
+
+       cases := []struct {
+               f   func()
+               err error
+       }{
+               {},
+               {
+                       f: func() {
+                               bh.Version = 2
+                       },
+               },
+               {
+                       f: func() {
+                               bh.ExtHash = newHash(1)
+                       },
+                       err: errNonemptyExtHash,
+               },
+       }
+
+       for i, c := range cases {
+               t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
+                       proto.Unmarshal(baseBytes, &bh)
+                       if c.f != nil {
+                               c.f()
+                       }
+                       err := checkValidBlockHeader(&bh)
+                       if err != c.err {
+                               t.Errorf("got error %s, want %s; bh is:\n%s", err, c.err, spew.Sdump(bh))
+                       }
+               })
+       }
+}
+
+// A txFixture is returned by sample (below) to produce a sample
+// transaction, which takes a separate, optional _input_ txFixture to
+// affect the transaction that's built. The components of the
+// transaction are the fields of txFixture.
+type txFixture struct {
+       initialBlockID       bc.Hash
+       issuanceProg         bc.Program
+       issuanceArgs         [][]byte
+       assetDef             []byte
+       assetID              bc.AssetID
+       txVersion            uint64
+       txInputs             []*legacy.TxInput
+       txOutputs            []*legacy.TxOutput
+       txMinTime, txMaxTime uint64
+       txRefData            []byte
+       tx                   *legacy.TxData
+}
+
+// Produces a sample transaction in a txFixture object (see above). A
+// separate input txFixture can be used to alter the transaction
+// that's created.
+//
+// The output of this function can be used as the input to a
+// subsequent call to make iterative refinements to a test object.
+//
+// The default transaction produced is valid and has three inputs:
+//  - an issuance of 10 units
+//  - a spend of 20 units
+//  - a spend of 40 units
+// and two outputs, one of 25 units and one of 45 units.
+// All amounts are denominated in the same asset.
+//
+// The issuance program for the asset requires two numbers as
+// arguments that add up to 5. The prevout control programs require
+// two numbers each, adding to 9 and 13, respectively.
+//
+// The min and max times for the transaction are now +/- one minute.
+func sample(tb testing.TB, in *txFixture) *txFixture {
+       var result txFixture
+       if in != nil {
+               result = *in
+       }
+
+       if result.initialBlockID.IsZero() {
+               result.initialBlockID = *newHash(1)
+       }
+       if testutil.DeepEqual(result.issuanceProg, bc.Program{}) {
+               prog, err := vm.Assemble("ADD 5 NUMEQUAL")
+               if err != nil {
+                       tb.Fatal(err)
+               }
+               result.issuanceProg = bc.Program{VmVersion: 1, Code: prog}
+       }
+       if len(result.issuanceArgs) == 0 {
+               result.issuanceArgs = [][]byte{[]byte{2}, []byte{3}}
+       }
+       if len(result.assetDef) == 0 {
+               result.assetDef = []byte{2}
+       }
+       if result.assetID.IsZero() {
+               refdatahash := hashData(result.assetDef)
+               result.assetID = bc.ComputeAssetID(result.issuanceProg.Code, &result.initialBlockID, result.issuanceProg.VmVersion, &refdatahash)
+       }
+
+       if result.txVersion == 0 {
+               result.txVersion = 1
+       }
+       if len(result.txInputs) == 0 {
+               cp1, err := vm.Assemble("ADD 9 NUMEQUAL")
+               if err != nil {
+                       tb.Fatal(err)
+               }
+               args1 := [][]byte{[]byte{4}, []byte{5}}
+
+               cp2, err := vm.Assemble("ADD 13 NUMEQUAL")
+               if err != nil {
+                       tb.Fatal(err)
+               }
+               args2 := [][]byte{[]byte{6}, []byte{7}}
+
+               result.txInputs = []*legacy.TxInput{
+                       legacy.NewIssuanceInput([]byte{3}, 10, []byte{4}, result.initialBlockID, result.issuanceProg.Code, result.issuanceArgs, result.assetDef),
+                       legacy.NewSpendInput(args1, *newHash(5), result.assetID, 20, 0, cp1, *newHash(6), []byte{7}),
+                       legacy.NewSpendInput(args2, *newHash(8), result.assetID, 40, 0, cp2, *newHash(9), []byte{10}),
+               }
+       }
+       if len(result.txOutputs) == 0 {
+               cp1, err := vm.Assemble("ADD 17 NUMEQUAL")
+               if err != nil {
+                       tb.Fatal(err)
+               }
+               cp2, err := vm.Assemble("ADD 21 NUMEQUAL")
+               if err != nil {
+                       tb.Fatal(err)
+               }
+
+               result.txOutputs = []*legacy.TxOutput{
+                       legacy.NewTxOutput(result.assetID, 25, cp1, []byte{11}),
+                       legacy.NewTxOutput(result.assetID, 45, cp2, []byte{12}),
+               }
+       }
+       if result.txMinTime == 0 {
+               result.txMinTime = bc.Millis(time.Now().Add(-time.Minute))
+       }
+       if result.txMaxTime == 0 {
+               result.txMaxTime = bc.Millis(time.Now().Add(time.Minute))
+       }
+       if len(result.txRefData) == 0 {
+               result.txRefData = []byte{13}
+       }
+
+       result.tx = &legacy.TxData{
+               Version:       result.txVersion,
+               Inputs:        result.txInputs,
+               Outputs:       result.txOutputs,
+               MinTime:       result.txMinTime,
+               MaxTime:       result.txMaxTime,
+               ReferenceData: result.txRefData,
+       }
+
+       return &result
+}
+
+// Like errors.Root, but also unwraps vm.Error objects.
+func rootErr(e error) error {
+       for {
+               e = errors.Root(e)
+               if e2, ok := e.(vm.Error); ok {
+                       e = e2.Err
+                       continue
+               }
+               return e
+       }
+}
+
+func hashData(data []byte) bc.Hash {
+       var b32 [32]byte
+       sha3pool.Sum256(b32[:], data)
+       return bc.NewHash(b32)
+}
+
+func newHash(n byte) *bc.Hash {
+       h := bc.NewHash([32]byte{n})
+       return &h
+}
+
+func newAssetID(n byte) *bc.AssetID {
+       a := bc.NewAssetID([32]byte{n})
+       return &a
+}
+
+func txIssuance(t *testing.T, tx *bc.Tx, index int) *bc.Issuance {
+       id := tx.InputIDs[index]
+       res, err := tx.Issuance(id)
+       if err != nil {
+               t.Fatal(err)
+       }
+       return res
+}
+
+func txSpend(t *testing.T, tx *bc.Tx, index int) *bc.Spend {
+       id := tx.InputIDs[index]
+       res, err := tx.Spend(id)
+       if err != nil {
+               t.Fatal(err)
+       }
+       return res
+}
diff --git a/protocol/validation/vmcontext.go b/protocol/validation/vmcontext.go
new file mode 100644 (file)
index 0000000..8b36112
--- /dev/null
@@ -0,0 +1,205 @@
+package validation
+
+import (
+       "bytes"
+
+       "chain/crypto/sha3pool"
+       "chain/errors"
+       "chain/protocol/bc"
+       "chain/protocol/vm"
+)
+
+func newBlockVMContext(block *bc.Block, prog []byte, args [][]byte) *vm.Context {
+       blockHash := block.ID.Bytes()
+       return &vm.Context{
+               VMVersion: 1,
+               Code:      prog,
+               Arguments: args,
+
+               BlockHash:            &blockHash,
+               BlockTimeMS:          &block.TimestampMs,
+               NextConsensusProgram: &block.NextConsensusProgram,
+       }
+}
+
+func NewTxVMContext(tx *bc.Tx, entry bc.Entry, prog *bc.Program, args [][]byte) *vm.Context {
+       var (
+               numResults = uint64(len(tx.ResultIds))
+               txData     = tx.Data.Bytes()
+               entryID    = bc.EntryID(entry) // TODO(bobg): pass this in, don't recompute it
+
+               assetID       *[]byte
+               amount        *uint64
+               entryData     *[]byte
+               destPos       *uint64
+               anchorID      *[]byte
+               spentOutputID *[]byte
+       )
+
+       switch e := entry.(type) {
+       case *bc.Nonce:
+               anchored := tx.Entries[*e.WitnessAnchoredId]
+               if iss, ok := anchored.(*bc.Issuance); ok {
+                       a1 := iss.Value.AssetId.Bytes()
+                       assetID = &a1
+                       amount = &iss.Value.Amount
+               }
+
+       case *bc.Issuance:
+               a1 := e.Value.AssetId.Bytes()
+               assetID = &a1
+               amount = &e.Value.Amount
+               destPos = &e.WitnessDestination.Position
+               d := e.Data.Bytes()
+               entryData = &d
+               a2 := e.AnchorId.Bytes()
+               anchorID = &a2
+
+       case *bc.Spend:
+               spentOutput := tx.Entries[*e.SpentOutputId].(*bc.Output)
+               a1 := spentOutput.Source.Value.AssetId.Bytes()
+               assetID = &a1
+               amount = &spentOutput.Source.Value.Amount
+               destPos = &e.WitnessDestination.Position
+               d := e.Data.Bytes()
+               entryData = &d
+               s := e.SpentOutputId.Bytes()
+               spentOutputID = &s
+
+       case *bc.Output:
+               d := e.Data.Bytes()
+               entryData = &d
+
+       case *bc.Retirement:
+               d := e.Data.Bytes()
+               entryData = &d
+       }
+
+       var txSigHash *[]byte
+       txSigHashFn := func() []byte {
+               if txSigHash == nil {
+                       hasher := sha3pool.Get256()
+                       defer sha3pool.Put256(hasher)
+
+                       entryID.WriteTo(hasher)
+                       tx.ID.WriteTo(hasher)
+
+                       var hash bc.Hash
+                       hash.ReadFrom(hasher)
+                       hashBytes := hash.Bytes()
+                       txSigHash = &hashBytes
+               }
+               return *txSigHash
+       }
+
+       ec := &entryContext{
+               entry:   entry,
+               entries: tx.Entries,
+       }
+
+       result := &vm.Context{
+               VMVersion: prog.VmVersion,
+               Code:      prog.Code,
+               Arguments: args,
+
+               EntryID: entryID.Bytes(),
+
+               TxVersion: &tx.Version,
+
+               TxSigHash:     txSigHashFn,
+               NumResults:    &numResults,
+               AssetID:       assetID,
+               Amount:        amount,
+               MinTimeMS:     &tx.MinTimeMs,
+               MaxTimeMS:     &tx.MaxTimeMs,
+               EntryData:     entryData,
+               TxData:        &txData,
+               DestPos:       destPos,
+               AnchorID:      anchorID,
+               SpentOutputID: spentOutputID,
+               CheckOutput:   ec.checkOutput,
+       }
+
+       return result
+}
+
+type entryContext struct {
+       entry   bc.Entry
+       entries map[bc.Hash]bc.Entry
+}
+
+func (ec *entryContext) checkOutput(index uint64, data []byte, amount uint64, assetID []byte, vmVersion uint64, code []byte, expansion bool) (bool, error) {
+       checkEntry := func(e bc.Entry) (bool, error) {
+               check := func(prog *bc.Program, value *bc.AssetAmount, dataHash *bc.Hash) bool {
+                       return (prog.VmVersion == vmVersion &&
+                               bytes.Equal(prog.Code, code) &&
+                               bytes.Equal(value.AssetId.Bytes(), assetID) &&
+                               value.Amount == amount &&
+                               (len(data) == 0 || bytes.Equal(dataHash.Bytes(), data)))
+               }
+
+               switch e := e.(type) {
+               case *bc.Output:
+                       return check(e.ControlProgram, e.Source.Value, e.Data), nil
+
+               case *bc.Retirement:
+                       var prog bc.Program
+                       if expansion {
+                               // The spec requires prog.Code to be the empty string only
+                               // when !expansion. When expansion is true, we prepopulate
+                               // prog.Code to give check() a freebie match.
+                               //
+                               // (The spec always requires prog.VmVersion to be zero.)
+                               prog.Code = code
+                       }
+                       return check(&prog, e.Source.Value, e.Data), nil
+               }
+
+               return false, vm.ErrContext
+       }
+
+       checkMux := func(m *bc.Mux) (bool, error) {
+               if index >= uint64(len(m.WitnessDestinations)) {
+                       return false, errors.Wrapf(vm.ErrBadValue, "index %d >= %d", index, len(m.WitnessDestinations))
+               }
+               eID := m.WitnessDestinations[index].Ref
+               e, ok := ec.entries[*eID]
+               if !ok {
+                       return false, errors.Wrapf(bc.ErrMissingEntry, "entry for mux destination %d, id %x, not found", index, eID.Bytes())
+               }
+               return checkEntry(e)
+       }
+
+       switch e := ec.entry.(type) {
+       case *bc.Mux:
+               return checkMux(e)
+
+       case *bc.Issuance:
+               d, ok := ec.entries[*e.WitnessDestination.Ref]
+               if !ok {
+                       return false, errors.Wrapf(bc.ErrMissingEntry, "entry for issuance destination %x not found", e.WitnessDestination.Ref.Bytes())
+               }
+               if m, ok := d.(*bc.Mux); ok {
+                       return checkMux(m)
+               }
+               if index != 0 {
+                       return false, errors.Wrapf(vm.ErrBadValue, "index %d >= 1", index)
+               }
+               return checkEntry(d)
+
+       case *bc.Spend:
+               d, ok := ec.entries[*e.WitnessDestination.Ref]
+               if !ok {
+                       return false, errors.Wrapf(bc.ErrMissingEntry, "entry for spend destination %x not found", e.WitnessDestination.Ref.Bytes())
+               }
+               if m, ok := d.(*bc.Mux); ok {
+                       return checkMux(m)
+               }
+               if index != 0 {
+                       return false, errors.Wrapf(vm.ErrBadValue, "index %d >= 1", index)
+               }
+               return checkEntry(d)
+       }
+
+       return false, vm.ErrContext
+}
diff --git a/protocol/validation/vmcontext_test.go b/protocol/validation/vmcontext_test.go
new file mode 100644 (file)
index 0000000..f5f8389
--- /dev/null
@@ -0,0 +1,112 @@
+package validation
+
+import (
+       "encoding/hex"
+       "fmt"
+       "testing"
+
+       "chain/errors"
+       "chain/protocol/bc"
+       "chain/protocol/bc/legacy"
+       "chain/protocol/vm"
+)
+
+func TestCheckOutput(t *testing.T) {
+       tx := legacy.NewTx(legacy.TxData{
+               ReferenceData: []byte("txref"),
+               Inputs: []*legacy.TxInput{
+                       legacy.NewSpendInput(nil, bc.Hash{}, bc.NewAssetID([32]byte{1}), 5, 1, []byte("spendprog"), bc.Hash{}, []byte("ref")),
+                       legacy.NewIssuanceInput(nil, 6, nil, bc.Hash{}, []byte("issueprog"), nil, nil),
+               },
+               Outputs: []*legacy.TxOutput{
+                       legacy.NewTxOutput(bc.NewAssetID([32]byte{3}), 8, []byte("wrongprog"), nil),
+                       legacy.NewTxOutput(bc.NewAssetID([32]byte{3}), 8, []byte("controlprog"), nil),
+                       legacy.NewTxOutput(bc.NewAssetID([32]byte{2}), 8, []byte("controlprog"), nil),
+                       legacy.NewTxOutput(bc.NewAssetID([32]byte{2}), 7, []byte("controlprog"), nil),
+                       legacy.NewTxOutput(bc.NewAssetID([32]byte{2}), 7, []byte("controlprog"), []byte("outref")),
+               },
+               MinTime: 0,
+               MaxTime: 20,
+       })
+
+       txCtx := &entryContext{
+               entry:   tx.Tx.Entries[tx.Tx.InputIDs[0]],
+               entries: tx.Tx.Entries,
+       }
+
+       cases := []struct {
+               // args to CheckOutput
+               index     uint64
+               data      []byte
+               amount    uint64
+               assetID   []byte
+               vmVersion uint64
+               code      []byte
+
+               wantErr error
+               wantOk  bool
+       }{
+               {
+                       index:     4,
+                       data:      mustDecodeHex("1f2a05f881ed9fa0c9068a84823677409f863891a2196eb55dbfbb677a566374"),
+                       amount:    7,
+                       assetID:   append([]byte{2}, make([]byte, 31)...),
+                       vmVersion: 1,
+                       code:      []byte("controlprog"),
+                       wantOk:    true,
+               },
+               {
+                       index:     3,
+                       data:      mustDecodeHex("1f2a05f881ed9fa0c9068a84823677409f863891a2196eb55dbfbb677a566374"),
+                       amount:    7,
+                       assetID:   append([]byte{2}, make([]byte, 31)...),
+                       vmVersion: 1,
+                       code:      []byte("controlprog"),
+                       wantOk:    false,
+               },
+               {
+                       index:     0,
+                       data:      []byte{},
+                       amount:    1,
+                       assetID:   append([]byte{9}, make([]byte, 31)...),
+                       vmVersion: 1,
+                       code:      []byte("missingprog"),
+                       wantOk:    false,
+               },
+               {
+                       index:     5,
+                       data:      mustDecodeHex("1f2a05f881ed9fa0c9068a84823677409f863891a2196eb55dbfbb677a566374"),
+                       amount:    7,
+                       assetID:   append([]byte{2}, make([]byte, 31)...),
+                       vmVersion: 1,
+                       code:      []byte("controlprog"),
+                       wantErr:   vm.ErrBadValue,
+               },
+       }
+
+       for i, test := range cases {
+               t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
+                       gotOk, err := txCtx.checkOutput(test.index, test.data, test.amount, test.assetID, test.vmVersion, test.code, false)
+                       if g := errors.Root(err); g != test.wantErr {
+                               t.Errorf("checkOutput(%v, %v, %v, %x, %v, %x) err = %v, want %v",
+                                       test.index, test.data, test.amount, test.assetID, test.vmVersion, test.code,
+                                       g, test.wantErr)
+                               return
+                       }
+                       if gotOk != test.wantOk {
+                               t.Errorf("checkOutput(%v, %v, %v, %x, %v, %x) ok = %v, want %v",
+                                       test.index, test.data, test.amount, test.assetID, test.vmVersion, test.code,
+                                       gotOk, test.wantOk)
+                       }
+
+               })
+       }
+}
+
+func mustDecodeHex(h string) []byte {
+       bits, err := hex.DecodeString(h)
+       if err != nil {
+               panic(err)
+       }
+       return bits
+}
diff --git a/protocol/vm/assemble.go b/protocol/vm/assemble.go
new file mode 100644 (file)
index 0000000..8684c05
--- /dev/null
@@ -0,0 +1,228 @@
+package vm
+
+import (
+       "bufio"
+       "encoding/binary"
+       "encoding/hex"
+       "fmt"
+       "math"
+       "strconv"
+       "strings"
+       "unicode"
+
+       "chain/errors"
+)
+
+// Assemble converts a string like "2 3 ADD 5 NUMEQUAL" into 0x525393559c.
+// The input should not include PUSHDATA (or OP_<num>) ops; those will
+// be inferred.
+// Input may include jump-target labels of the form $foo, which can
+// then be used as JUMP:$foo or JUMPIF:$foo.
+func Assemble(s string) (res []byte, err error) {
+       // maps labels to the location each refers to
+       locations := make(map[string]uint32)
+
+       // maps unresolved uses of labels to the locations that need to be filled in
+       unresolved := make(map[string][]int)
+
+       handleJump := func(addrStr string, opcode Op) error {
+               res = append(res, byte(opcode))
+               l := len(res)
+
+               var fourBytes [4]byte
+               res = append(res, fourBytes[:]...)
+
+               if strings.HasPrefix(addrStr, "$") {
+                       unresolved[addrStr] = append(unresolved[addrStr], l)
+                       return nil
+               }
+
+               address, err := strconv.ParseUint(addrStr, 10, 32)
+               if err != nil {
+                       return err
+               }
+               binary.LittleEndian.PutUint32(res[l:], uint32(address))
+               return nil
+       }
+
+       scanner := bufio.NewScanner(strings.NewReader(s))
+       scanner.Split(split)
+       for scanner.Scan() {
+               token := scanner.Text()
+               if info, ok := opsByName[token]; ok {
+                       if strings.HasPrefix(token, "PUSHDATA") || strings.HasPrefix(token, "JUMP") {
+                               return nil, errors.Wrap(ErrToken, token)
+                       }
+                       res = append(res, byte(info.op))
+               } else if strings.HasPrefix(token, "JUMP:") {
+                       // TODO (Dan): add IF/ELSE/ENDIF and BEGIN/WHILE/REPEAT
+                       err = handleJump(strings.TrimPrefix(token, "JUMP:"), OP_JUMP)
+                       if err != nil {
+                               return nil, err
+                       }
+               } else if strings.HasPrefix(token, "JUMPIF:") {
+                       err = handleJump(strings.TrimPrefix(token, "JUMPIF:"), OP_JUMPIF)
+                       if err != nil {
+                               return nil, err
+                       }
+               } else if strings.HasPrefix(token, "$") {
+                       if _, seen := locations[token]; seen {
+                               return nil, fmt.Errorf("label %s redefined", token)
+                       }
+                       if len(res) > math.MaxInt32 {
+                               return nil, fmt.Errorf("program too long")
+                       }
+                       locations[token] = uint32(len(res))
+               } else if strings.HasPrefix(token, "0x") {
+                       bytes, err := hex.DecodeString(strings.TrimPrefix(token, "0x"))
+                       if err != nil {
+                               return nil, err
+                       }
+                       res = append(res, PushdataBytes(bytes)...)
+               } else if len(token) >= 2 && token[0] == '\'' && token[len(token)-1] == '\'' {
+                       bytes := make([]byte, 0, len(token)-2)
+                       var b int
+                       for i := 1; i < len(token)-1; i++ {
+                               if token[i] == '\\' {
+                                       i++
+                               }
+                               bytes = append(bytes, token[i])
+                               b++
+                       }
+                       res = append(res, PushdataBytes(bytes)...)
+               } else if num, err := strconv.ParseInt(token, 10, 64); err == nil {
+                       res = append(res, PushdataInt64(num)...)
+               } else {
+                       return nil, errors.Wrap(ErrToken, token)
+               }
+       }
+       err = scanner.Err()
+       if err != nil {
+               return nil, err
+       }
+
+       for label, uses := range unresolved {
+               location, ok := locations[label]
+               if !ok {
+                       return nil, fmt.Errorf("undefined label %s", label)
+               }
+               for _, use := range uses {
+                       binary.LittleEndian.PutUint32(res[use:], location)
+               }
+       }
+
+       return res, nil
+}
+
+func Disassemble(prog []byte) (string, error) {
+       var (
+               insts []Instruction
+
+               // maps program locations (used as jump targets) to a label for each
+               labels = make(map[uint32]string)
+       )
+
+       // first pass: look for jumps
+       for i := uint32(0); i < uint32(len(prog)); {
+               inst, err := ParseOp(prog, i)
+               if err != nil {
+                       return "", err
+               }
+               switch inst.Op {
+               case OP_JUMP, OP_JUMPIF:
+                       addr := binary.LittleEndian.Uint32(inst.Data)
+                       if _, ok := labels[addr]; !ok {
+                               labelNum := len(labels)
+                               label := words[labelNum%len(words)]
+                               if labelNum >= len(words) {
+                                       label += fmt.Sprintf("%d", labelNum/len(words)+1)
+                               }
+                               labels[addr] = label
+                       }
+               }
+               insts = append(insts, inst)
+               i += inst.Len
+       }
+
+       var (
+               loc  uint32
+               strs []string
+       )
+
+       for _, inst := range insts {
+               if label, ok := labels[loc]; ok {
+                       strs = append(strs, "$"+label)
+               }
+
+               var str string
+               switch inst.Op {
+               case OP_JUMP, OP_JUMPIF:
+                       addr := binary.LittleEndian.Uint32(inst.Data)
+                       str = fmt.Sprintf("%s:$%s", inst.Op.String(), labels[addr])
+               default:
+                       if len(inst.Data) > 0 {
+                               str = fmt.Sprintf("0x%x", inst.Data)
+                       } else {
+                               str = inst.Op.String()
+                       }
+               }
+               strs = append(strs, str)
+
+               loc += inst.Len
+       }
+
+       if label, ok := labels[loc]; ok {
+               strs = append(strs, "$"+label)
+       }
+
+       return strings.Join(strs, " "), nil
+}
+
+// split is a bufio.SplitFunc for scanning the input to Compile.
+// It starts like bufio.ScanWords but adjusts the return value to
+// account for quoted strings.
+func split(inp []byte, atEOF bool) (advance int, token []byte, err error) {
+       advance, token, err = bufio.ScanWords(inp, atEOF)
+       if err != nil {
+               return
+       }
+       if len(token) > 1 && token[0] != '\'' {
+               return
+       }
+       var start int
+       for ; start < len(inp); start++ {
+               if !unicode.IsSpace(rune(inp[start])) {
+                       break
+               }
+       }
+       if start == len(inp) || inp[start] != '\'' {
+               return
+       }
+       var escape bool
+       for i := start + 1; i < len(inp); i++ {
+               if escape {
+                       escape = false
+               } else {
+                       switch inp[i] {
+                       case '\'':
+                               advance = i + 1
+                               token = inp[start:advance]
+                               return
+                       case '\\':
+                               escape = true
+                       }
+               }
+       }
+       // Reached the end of the input with no closing quote.
+       if atEOF {
+               return 0, nil, ErrToken
+       }
+       return 0, nil, nil
+}
+
+var words = []string{
+       "alpha", "bravo", "charlie", "delta", "echo", "foxtrot", "golf", "hotel",
+       "india", "juliet", "kilo", "lima", "mike", "november", "oscar", "papa",
+       "quebec", "romeo", "sierra", "tango", "uniform", "victor", "whisky", "xray",
+       "yankee", "zulu",
+}
diff --git a/protocol/vm/assemble_test.go b/protocol/vm/assemble_test.go
new file mode 100644 (file)
index 0000000..8ca0864
--- /dev/null
@@ -0,0 +1,82 @@
+package vm
+
+import (
+       "bytes"
+       "encoding/hex"
+       "testing"
+
+       "chain/errors"
+)
+
+func TestAssemble(t *testing.T) {
+       cases := []struct {
+               plain   string
+               want    []byte
+               wantErr error
+       }{
+               {"2 3 ADD 5 NUMEQUAL", mustDecodeHex("525393559c"), nil},
+               {"0x02 3 ADD 5 NUMEQUAL", mustDecodeHex("01025393559c"), nil},
+               {"19 14 SUB 5 NUMEQUAL", mustDecodeHex("01135e94559c"), nil},
+               {"'Hello' 'WORLD' CAT 'HELLOWORLD' EQUAL", mustDecodeHex("0548656c6c6f05574f524c447e0a48454c4c4f574f524c4487"), nil},
+               {`'H\'E' 'W' CAT 'H\'EW' EQUAL`, mustDecodeHex("0348274501577e044827455787"), nil},
+               {`'HELLO '  'WORLD' CAT 'HELLO WORLD' EQUAL`, mustDecodeHex("0648454c4c4f2005574f524c447e0b48454c4c4f20574f524c4487"), nil},
+               {`0x1`, nil, hex.ErrLength},
+               {`BADTOKEN`, nil, ErrToken},
+               {`'Unterminated quote`, nil, ErrToken},
+       }
+
+       for _, c := range cases {
+               got, gotErr := Assemble(c.plain)
+
+               if errors.Root(gotErr) != c.wantErr {
+                       t.Errorf("Compile(%s) err = %v want %v", c.plain, errors.Root(gotErr), c.wantErr)
+                       continue
+               }
+
+               if c.wantErr != nil {
+                       continue
+               }
+
+               if !bytes.Equal(got, c.want) {
+                       t.Errorf("Compile(%s) = %x want %x", c.plain, got, c.want)
+               }
+       }
+}
+
+func TestDisassemble(t *testing.T) {
+       cases := []struct {
+               raw     []byte
+               want    string
+               wantErr error
+       }{
+               {mustDecodeHex("525393559c"), "0x02 0x03 ADD 0x05 NUMEQUAL", nil},
+               {mustDecodeHex("01135e94559c"), "0x13 0x0e SUB 0x05 NUMEQUAL", nil},
+               {mustDecodeHex("6300000000"), "$alpha JUMP:$alpha", nil},
+               {[]byte{0xff}, "NOPxff", nil},
+       }
+
+       for _, c := range cases {
+               got, gotErr := Disassemble(c.raw)
+
+               if errors.Root(gotErr) != c.wantErr {
+                       t.Errorf("Decompile(%x) err = %v want %v", c.raw, errors.Root(gotErr), c.wantErr)
+                       continue
+               }
+
+               if c.wantErr != nil {
+                       continue
+               }
+
+               if got != c.want {
+                       t.Errorf("Decompile(%x) = %s want %s", c.raw, got, c.want)
+               }
+       }
+}
+
+func mustDecodeHex(h string) []byte {
+       bits, err := hex.DecodeString(h)
+       if err != nil {
+               panic(err)
+       }
+       return bits
+}
diff --git a/protocol/vm/bitwise.go b/protocol/vm/bitwise.go
new file mode 100644 (file)
index 0000000..1d20dcf
--- /dev/null
@@ -0,0 +1,150 @@
+package vm
+
+import "bytes"
+
+func opInvert(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+       top, err := vm.top()
+       if err != nil {
+               return err
+       }
+       err = vm.applyCost(int64(len(top)))
+       if err != nil {
+               return err
+       }
+       // Could rewrite top in place but maybe it's a shared data
+       // structure?
+       newTop := make([]byte, 0, len(top))
+       for _, b := range top {
+               newTop = append(newTop, ^b)
+       }
+       vm.dataStack[len(vm.dataStack)-1] = newTop
+       return nil
+}
+
+func opAnd(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+       b, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       a, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       min, max := len(a), len(b)
+       if min > max {
+               min, max = max, min
+       }
+       err = vm.applyCost(int64(min))
+       if err != nil {
+               return err
+       }
+       res := make([]byte, 0, min)
+       for i := 0; i < min; i++ {
+               res = append(res, a[i]&b[i])
+       }
+       return vm.push(res, true)
+}
+
+func opOr(vm *virtualMachine) error {
+       return doOr(vm, false)
+}
+
+func opXor(vm *virtualMachine) error {
+       return doOr(vm, true)
+}
+
+func doOr(vm *virtualMachine, xor bool) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+       b, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       a, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       min, max := len(a), len(b)
+       if min > max {
+               min, max = max, min
+       }
+       err = vm.applyCost(int64(max))
+       if err != nil {
+               return err
+       }
+       res := make([]byte, 0, max)
+       for i := 0; i < max; i++ {
+               var aByte, bByte, resByte byte
+               if i >= len(a) {
+                       aByte = 0
+               } else {
+                       aByte = a[i]
+               }
+               if i >= len(b) {
+                       bByte = 0
+               } else {
+                       bByte = b[i]
+               }
+               if xor {
+                       resByte = aByte ^ bByte
+               } else {
+                       resByte = aByte | bByte
+               }
+
+               res = append(res, resByte)
+       }
+       return vm.push(res, true)
+}
+
+func opEqual(vm *virtualMachine) error {
+       res, err := doEqual(vm)
+       if err != nil {
+               return err
+       }
+       return vm.pushBool(res, true)
+}
+
+func opEqualVerify(vm *virtualMachine) error {
+       res, err := doEqual(vm)
+       if err != nil {
+               return err
+       }
+       if res {
+               return nil
+       }
+       return ErrVerifyFailed
+}
+
+func doEqual(vm *virtualMachine) (bool, error) {
+       err := vm.applyCost(1)
+       if err != nil {
+               return false, err
+       }
+       b, err := vm.pop(true)
+       if err != nil {
+               return false, err
+       }
+       a, err := vm.pop(true)
+       if err != nil {
+               return false, err
+       }
+       min, max := len(a), len(b)
+       if min > max {
+               min, max = max, min
+       }
+       err = vm.applyCost(int64(min))
+       if err != nil {
+               return false, err
+       }
+       return bytes.Equal(a, b), nil
+}
diff --git a/protocol/vm/bitwise_test.go b/protocol/vm/bitwise_test.go
new file mode 100644 (file)
index 0000000..7516b54
--- /dev/null
@@ -0,0 +1,269 @@
+package vm
+
+import (
+       "testing"
+
+       "chain/testutil"
+)
+
+func TestBitwiseOps(t *testing.T) {
+       type testStruct struct {
+               op      Op
+               startVM *virtualMachine
+               wantErr error
+               wantVM  *virtualMachine
+       }
+       cases := []testStruct{{
+               op: OP_INVERT,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{255}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49998,
+                       dataStack: [][]byte{{0}},
+               },
+       }, {
+               op: OP_INVERT,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{255, 0}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49997,
+                       dataStack: [][]byte{{0, 255}},
+               },
+       }, {
+               op: OP_AND,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{0xff}, {0x80}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{0x80}},
+               },
+       }, {
+               op: OP_AND,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{0xff}, {0x80, 0xff}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -10,
+                       dataStack:    [][]byte{{0x80}},
+               },
+       }, {
+               op: OP_AND,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{0x80, 0xff}, {0xff}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -10,
+                       dataStack:    [][]byte{{0x80}},
+               },
+       }, {
+               op: OP_OR,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{0xff}, {0x80}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{0xff}},
+               },
+       }, {
+               op: OP_OR,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{0xff}, {0x80, 0x10}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49997,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{0xff, 0x10}},
+               },
+       }, {
+               op: OP_OR,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{0xff, 0x10}, {0x80}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49997,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{0xff, 0x10}},
+               },
+       }, {
+               op: OP_XOR,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{0xff}, {0x80}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{0x7f}},
+               },
+       }, {
+               op: OP_XOR,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{0xff}, {0x80, 0x10}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49997,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{0x7f, 0x10}},
+               },
+       }, {
+               op: OP_XOR,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{0xff, 0x10}, {0x80}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49997,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{0x7f, 0x10}},
+               },
+       }, {
+               op: OP_EQUAL,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{0xff}, {0xff}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{1}},
+               },
+       }, {
+               op: OP_EQUAL,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{0xff, 0x10}, {0xff, 0x10}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49997,
+                       deferredCost: -11,
+                       dataStack:    [][]byte{{1}},
+               },
+       }, {
+               op: OP_EQUAL,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{0xff}, {0x80}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -10,
+                       dataStack:    [][]byte{{}},
+               },
+       }, {
+               op: OP_EQUAL,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{0xff}, {0xff, 0x80}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -11,
+                       dataStack:    [][]byte{{}},
+               },
+       }, {
+               op: OP_EQUAL,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{0xff, 0x80}, {0xff}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -11,
+                       dataStack:    [][]byte{{}},
+               },
+       }, {
+               op: OP_EQUALVERIFY,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{0xff}, {0xff}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -18,
+                       dataStack:    [][]byte{},
+               },
+       }, {
+               op: OP_EQUALVERIFY,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{0xff, 0x10}, {0xff, 0x10}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49997,
+                       deferredCost: -20,
+                       dataStack:    [][]byte{},
+               },
+       }, {
+               op: OP_EQUALVERIFY,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{0xff}, {0x80}},
+               },
+               wantErr: ErrVerifyFailed,
+       }, {
+               op: OP_EQUALVERIFY,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{0xff}, {0xff, 0x80}},
+               },
+               wantErr: ErrVerifyFailed,
+       }, {
+               op: OP_EQUALVERIFY,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{0xff, 0x80}, {0xff}},
+               },
+               wantErr: ErrVerifyFailed,
+       }}
+
+       bitops := []Op{OP_INVERT, OP_AND, OP_OR, OP_XOR, OP_EQUAL, OP_EQUALVERIFY}
+       for _, op := range bitops {
+               cases = append(cases, testStruct{
+                       op: op,
+                       startVM: &virtualMachine{
+                               runLimit:  0,
+                               dataStack: [][]byte{{0xff}, {0xff}},
+                       },
+                       wantErr: ErrRunLimitExceeded,
+               }, testStruct{
+                       op: op,
+                       startVM: &virtualMachine{
+                               runLimit:  1,
+                               dataStack: [][]byte{{0xff}, {0xff}},
+                       },
+                       wantErr: ErrRunLimitExceeded,
+               })
+       }
+
+       for i, c := range cases {
+               err := ops[c.op].fn(c.startVM)
+
+               if err != c.wantErr {
+                       t.Errorf("case %d, op %s: got err = %v want %v", i, ops[c.op].name, err, c.wantErr)
+                       continue
+               }
+               if c.wantErr != nil {
+                       continue
+               }
+
+               if !testutil.DeepEqual(c.startVM, c.wantVM) {
+                       t.Errorf("case %d, op %s: unexpected vm result\n\tgot:  %+v\n\twant: %+v\n", i, ops[c.op].name, c.startVM, c.wantVM)
+               }
+       }
+}
diff --git a/protocol/vm/context.go b/protocol/vm/context.go
new file mode 100644 (file)
index 0000000..c56663e
--- /dev/null
@@ -0,0 +1,45 @@
+package vm
+
+// Context contains the execution context for the virtual machine.
+//
+// Most fields are pointers and are not required to be present in all
+// cases. A nil pointer means the value is absent in that context. If
+// an opcode executes that requires an absent field to be present, it
+// will return ErrContext.
+//
+// By convention, variables of this type have the name context, _not_
+// ctx (to avoid confusion with context.Context).
+type Context struct {
+       VMVersion uint64
+       Code      []byte
+       Arguments [][]byte
+
+       EntryID []byte
+
+       // TxVersion must be present when verifying transaction components
+       // (such as spends and issuances).
+       TxVersion *uint64
+
+       // These fields must be present when verifying block headers.
+
+       BlockHash            *[]byte
+       BlockTimeMS          *uint64
+       NextConsensusProgram *[]byte
+
+       // Fields below this point are required by particular opcodes when
+       // verifying transaction components.
+
+       NumResults    *uint64
+       AssetID       *[]byte
+       Amount        *uint64
+       MinTimeMS     *uint64
+       MaxTimeMS     *uint64
+       EntryData     *[]byte
+       TxData        *[]byte
+       DestPos       *uint64
+       AnchorID      *[]byte
+       SpentOutputID *[]byte
+
+       TxSigHash   func() []byte
+       CheckOutput func(index uint64, data []byte, amount uint64, assetID []byte, vmVersion uint64, code []byte, expansion bool) (bool, error)
+}
diff --git a/protocol/vm/control.go b/protocol/vm/control.go
new file mode 100644 (file)
index 0000000..5bbb4be
--- /dev/null
@@ -0,0 +1,105 @@
+package vm
+
+import (
+       "encoding/binary"
+)
+
+func opVerify(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+       p, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       if AsBool(p) {
+               return nil
+       }
+       return ErrVerifyFailed
+}
+
+func opFail(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+       return ErrReturn
+}
+
+func opCheckPredicate(vm *virtualMachine) error {
+       err := vm.applyCost(256)
+       if err != nil {
+               return err
+       }
+       vm.deferCost(-256 + 64) // get most of that cost back at the end
+       limit, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       predicate, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       n, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       if limit < 0 {
+               return ErrBadValue
+       }
+       l := int64(len(vm.dataStack))
+       if n > l {
+               return ErrDataStackUnderflow
+       }
+       if limit == 0 {
+               limit = vm.runLimit
+       }
+       err = vm.applyCost(limit)
+       if err != nil {
+               return err
+       }
+
+       childVM := virtualMachine{
+               context:   vm.context,
+               program:   predicate,
+               runLimit:  limit,
+               depth:     vm.depth + 1,
+               dataStack: append([][]byte{}, vm.dataStack[l-n:]...),
+       }
+       vm.dataStack = vm.dataStack[:l-n]
+
+       childErr := childVM.run()
+
+       vm.deferCost(-childVM.runLimit)
+       vm.deferCost(-stackCost(childVM.dataStack))
+       vm.deferCost(-stackCost(childVM.altStack))
+
+       return vm.pushBool(childErr == nil && !childVM.falseResult(), true)
+}
+
+func opJump(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+       address := binary.LittleEndian.Uint32(vm.data)
+       vm.nextPC = address
+       return nil
+}
+
+func opJumpIf(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+       p, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       if AsBool(p) {
+               address := binary.LittleEndian.Uint32(vm.data)
+               vm.nextPC = address
+       }
+       return nil
+}
diff --git a/protocol/vm/control_test.go b/protocol/vm/control_test.go
new file mode 100644 (file)
index 0000000..d6ef8ea
--- /dev/null
@@ -0,0 +1,220 @@
+package vm
+
+import (
+       "testing"
+
+       "chain/testutil"
+)
+
+func TestControlOps(t *testing.T) {
+       type testStruct struct {
+               op      Op
+               startVM *virtualMachine
+               wantErr error
+               wantVM  *virtualMachine
+       }
+       cases := []testStruct{{
+               op: OP_JUMP,
+               startVM: &virtualMachine{
+                       runLimit: 50000,
+                       pc:       0,
+                       nextPC:   1,
+                       data:     []byte{0x05, 0x00, 0x00, 0x00},
+               },
+               wantErr: nil,
+               wantVM: &virtualMachine{
+                       runLimit: 49999,
+                       pc:       0,
+                       nextPC:   5,
+                       data:     []byte{0x05, 0x00, 0x00, 0x00},
+               },
+       }, {
+               op: OP_JUMP,
+               startVM: &virtualMachine{
+                       runLimit: 50000,
+                       pc:       0,
+                       nextPC:   1,
+                       data:     []byte{0xff, 0xff, 0xff, 0xff},
+               },
+               wantErr: nil,
+               wantVM: &virtualMachine{
+                       runLimit: 49999,
+                       pc:       0,
+                       nextPC:   4294967295,
+                       data:     []byte{0xff, 0xff, 0xff, 0xff},
+               },
+       }, {
+               op: OP_JUMPIF,
+               startVM: &virtualMachine{
+                       runLimit:     50000,
+                       pc:           0,
+                       nextPC:       1,
+                       deferredCost: 0,
+                       dataStack:    [][]byte{{1}},
+                       data:         []byte{0x05, 0x00, 0x00, 0x00},
+               },
+               wantErr: nil,
+               wantVM: &virtualMachine{
+                       runLimit:     49999,
+                       pc:           0,
+                       nextPC:       5,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{},
+                       data:         []byte{0x05, 0x00, 0x00, 0x00},
+               },
+       }, {
+               op: OP_JUMPIF,
+               startVM: &virtualMachine{
+                       runLimit:     50000,
+                       pc:           0,
+                       nextPC:       1,
+                       deferredCost: 0,
+                       dataStack:    [][]byte{{}},
+                       data:         []byte{0x05, 0x00, 0x00, 0x00},
+               },
+               wantErr: nil,
+               wantVM: &virtualMachine{
+                       runLimit:     49999,
+                       pc:           0,
+                       nextPC:       1,
+                       deferredCost: -8,
+                       dataStack:    [][]byte{},
+                       data:         []byte{0x05, 0x00, 0x00, 0x00},
+               },
+       }, {
+               op: OP_VERIFY,
+               startVM: &virtualMachine{
+                       pc:           0,
+                       runLimit:     50000,
+                       deferredCost: 0,
+                       dataStack:    [][]byte{{1}},
+               },
+               wantErr: nil,
+               wantVM: &virtualMachine{
+                       runLimit:     49999,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{},
+               },
+       }, {
+               op: OP_VERIFY,
+               startVM: &virtualMachine{
+                       runLimit:     50000,
+                       deferredCost: 0,
+                       dataStack:    [][]byte{{1, 1}},
+               },
+               wantErr: nil,
+               wantVM: &virtualMachine{
+                       runLimit:     49999,
+                       deferredCost: -10,
+                       dataStack:    [][]byte{},
+               },
+       }, {
+               op: OP_VERIFY,
+               startVM: &virtualMachine{
+                       runLimit:     50000,
+                       deferredCost: 0,
+                       dataStack:    [][]byte{{}},
+               },
+               wantErr: ErrVerifyFailed,
+       }, {
+               startVM: &virtualMachine{runLimit: 50000},
+               op:      OP_FAIL,
+               wantErr: ErrReturn,
+       }, {
+               op: OP_CHECKPREDICATE,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{}, {byte(OP_TRUE)}, {}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     0,
+                       deferredCost: -49951,
+                       dataStack:    [][]byte{{1}},
+               },
+       }, {
+               op: OP_CHECKPREDICATE,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{}, {}, {}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     0,
+                       deferredCost: -49952,
+                       dataStack:    [][]byte{{}},
+               },
+       }, {
+               op: OP_CHECKPREDICATE,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{}, {byte(OP_FAIL)}, {}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     0,
+                       deferredCost: -49952,
+                       dataStack:    [][]byte{{}},
+               },
+       }, {
+               op: OP_CHECKPREDICATE,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{}, {}, Int64Bytes(-1)},
+               },
+               wantErr: ErrBadValue,
+       }, {
+               op: OP_CHECKPREDICATE,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{}, {}, Int64Bytes(50000)},
+               },
+               wantErr: ErrRunLimitExceeded,
+       }, {
+               op: OP_CHECKPREDICATE,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{0x05}, {0x07}, {0x02}, {byte(OP_ADD), byte(OP_12), byte(OP_NUMEQUAL)}, {}},
+               },
+               wantVM: &virtualMachine{
+                       deferredCost: -49968,
+                       dataStack:    [][]byte{{0x01}},
+               },
+       }, {
+               // stack underflow in child vm should produce false result in parent vm
+               op: OP_CHECKPREDICATE,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{0x05}, {0x07}, {0x01}, {byte(OP_ADD), byte(OP_DATA_12), byte(OP_NUMEQUAL)}, {}},
+               },
+               wantVM: &virtualMachine{
+                       deferredCost: -49954,
+                       dataStack:    [][]byte{{0x05}, {}},
+               },
+       }}
+
+       limitChecks := []Op{
+               OP_CHECKPREDICATE, OP_VERIFY, OP_FAIL,
+       }
+
+       for _, op := range limitChecks {
+               cases = append(cases, testStruct{
+                       op:      op,
+                       startVM: &virtualMachine{runLimit: 0},
+                       wantErr: ErrRunLimitExceeded,
+               })
+       }
+
+       for i, c := range cases {
+               err := ops[c.op].fn(c.startVM)
+
+               if err != c.wantErr {
+                       t.Errorf("case %d, op %s: got err = %v want %v", i, c.op.String(), err, c.wantErr)
+                       continue
+               }
+               if c.wantErr != nil {
+                       continue
+               }
+
+               if !testutil.DeepEqual(c.startVM, c.wantVM) {
+                       t.Errorf("case %d, op %s: unexpected vm result\n\tgot:  %+v\n\twant: %+v\n", i, c.op.String(), c.startVM, c.wantVM)
+               }
+       }
+}
diff --git a/protocol/vm/crypto.go b/protocol/vm/crypto.go
new file mode 100644 (file)
index 0000000..1346dbc
--- /dev/null
@@ -0,0 +1,149 @@
+package vm
+
+import (
+       "crypto/sha256"
+       "hash"
+
+       "golang.org/x/crypto/sha3"
+
+       "chain/crypto/ed25519"
+       "chain/math/checked"
+)
+
+func opSha256(vm *virtualMachine) error {
+       return doHash(vm, sha256.New)
+}
+
+func opSha3(vm *virtualMachine) error {
+       return doHash(vm, sha3.New256)
+}
+
+func doHash(vm *virtualMachine, hashFactory func() hash.Hash) error {
+       x, err := vm.pop(false)
+       if err != nil {
+               return err
+       }
+       cost := int64(len(x))
+       if cost < 64 {
+               cost = 64
+       }
+       err = vm.applyCost(cost)
+       if err != nil {
+               return err
+       }
+       h := hashFactory()
+       _, err = h.Write(x)
+       if err != nil {
+               return err
+       }
+       return vm.push(h.Sum(nil), false)
+}
+
+func opCheckSig(vm *virtualMachine) error {
+       err := vm.applyCost(1024)
+       if err != nil {
+               return err
+       }
+       pubkeyBytes, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       msg, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       sig, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       if len(msg) != 32 {
+               return ErrBadValue
+       }
+       if len(pubkeyBytes) != ed25519.PublicKeySize {
+               return vm.pushBool(false, true)
+       }
+       return vm.pushBool(ed25519.Verify(ed25519.PublicKey(pubkeyBytes), msg, sig), true)
+}
+
+func opCheckMultiSig(vm *virtualMachine) error {
+       numPubkeys, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       pubCost, ok := checked.MulInt64(numPubkeys, 1024)
+       if numPubkeys < 0 || !ok {
+               return ErrBadValue
+       }
+       err = vm.applyCost(pubCost)
+       if err != nil {
+               return err
+       }
+       numSigs, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       if numSigs < 0 || numSigs > numPubkeys || (numPubkeys > 0 && numSigs == 0) {
+               return ErrBadValue
+       }
+       pubkeyByteses := make([][]byte, 0, numPubkeys)
+       for i := int64(0); i < numPubkeys; i++ {
+               pubkeyBytes, err := vm.pop(true)
+               if err != nil {
+                       return err
+               }
+               pubkeyByteses = append(pubkeyByteses, pubkeyBytes)
+       }
+       msg, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       if len(msg) != 32 {
+               return ErrBadValue
+       }
+       sigs := make([][]byte, 0, numSigs)
+       for i := int64(0); i < numSigs; i++ {
+               sig, err := vm.pop(true)
+               if err != nil {
+                       return err
+               }
+               sigs = append(sigs, sig)
+       }
+
+       pubkeys := make([]ed25519.PublicKey, 0, numPubkeys)
+       for _, p := range pubkeyByteses {
+               if len(p) != ed25519.PublicKeySize {
+                       return vm.pushBool(false, true)
+               }
+               pubkeys = append(pubkeys, ed25519.PublicKey(p))
+       }
+
+       for len(sigs) > 0 && len(pubkeys) > 0 {
+               if ed25519.Verify(pubkeys[0], msg, sigs[0]) {
+                       sigs = sigs[1:]
+               }
+               pubkeys = pubkeys[1:]
+       }
+       return vm.pushBool(len(sigs) == 0, true)
+}
+
+func opTxSigHash(vm *virtualMachine) error {
+       err := vm.applyCost(256)
+       if err != nil {
+               return err
+       }
+       if vm.context.TxSigHash == nil {
+               return ErrContext
+       }
+       return vm.push(vm.context.TxSigHash(), false)
+}
+
+func opBlockHash(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+       if vm.context.BlockHash == nil {
+               return ErrContext
+       }
+       return vm.push(*vm.context.BlockHash, false)
+}
diff --git a/protocol/vm/crypto_test.go b/protocol/vm/crypto_test.go
new file mode 100644 (file)
index 0000000..c9efc78
--- /dev/null
@@ -0,0 +1,462 @@
+package vm
+
+import (
+       "testing"
+
+       "chain/testutil"
+)
+
+var emptyBlockVMContext = &Context{
+       BlockHash: &[]uint8{0xf0, 0x85, 0x4f, 0x88, 0xb4, 0x89, 0x0, 0x99, 0x2f, 0xec, 0x40, 0x43, 0xf9, 0x65, 0xfa, 0x2, 0x9d, 0xeb, 0x8a, 0xd6, 0x93, 0xcf, 0x37, 0x11, 0xfe, 0x83, 0x9, 0xb3, 0x90, 0x6a, 0x5a, 0x86},
+}
+
+func TestCheckSig(t *testing.T) {
+       cases := []struct {
+               prog    string
+               ok, err bool
+       }{
+               {
+                       // This one's OK
+                       "0x26ced30b1942b89ef5332a9f22f1a61e5a6a3f8a5bc33b2fc58b1daf78c81bf1d5c8add19cea050adeb37da3a7bf8f813c6a6922b42934a6441fa6bb1c7fc208 0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20 0xdbca6fb13badb7cfdf76510070ffad15b85f9934224a9e11202f5e8f86b584a6 CHECKSIG",
+                       true, false,
+               },
+               {
+                       // This one has a wrong-length signature
+                       "0x26ced30b1942b89ef5332a9f22f1a61e5a6a3f8a5bc33b2fc58b1daf78c81bf1d5c8add19cea050adeb37da3a7bf8f813c6a6922b42934a6441fa6bb1c7fc2 0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20 0xdbca6fb13badb7cfdf76510070ffad15b85f9934224a9e11202f5e8f86b584a6 CHECKSIG",
+                       false, false,
+               },
+               {
+                       // This one has a wrong-length message
+                       "0x26ced30b1942b89ef5332a9f22f1a61e5a6a3f8a5bc33b2fc58b1daf78c81bf1d5c8add19cea050adeb37da3a7bf8f813c6a6922b42934a6441fa6bb1c7fc208 0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f 0xdbca6fb13badb7cfdf76510070ffad15b85f9934224a9e11202f5e8f86b584a6 CHECKSIG",
+                       false, true,
+               },
+               {
+                       // This one has a wrong-length pubkey
+                       "0x26ced30b1942b89ef5332a9f22f1a61e5a6a3f8a5bc33b2fc58b1daf78c81bf1d5c8add19cea050adeb37da3a7bf8f813c6a6922b42934a6441fa6bb1c7fc208 0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20 0xdbca6fb13badb7cfdf76510070ffad15b85f9934224a9e11202f5e8f86b584 CHECKSIG",
+                       false, false,
+               },
+               {
+                       // This one has a wrong byte in the signature
+                       "0x00ced30b1942b89ef5332a9f22f1a61e5a6a3f8a5bc33b2fc58b1daf78c81bf1d5c8add19cea050adeb37da3a7bf8f813c6a6922b42934a6441fa6bb1c7fc208 0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20 0xdbca6fb13badb7cfdf76510070ffad15b85f9934224a9e11202f5e8f86b584a6 CHECKSIG",
+                       false, false,
+               },
+               {
+                       // This one has a wrong byte in the message
+                       "0x26ced30b1942b89ef5332a9f22f1a61e5a6a3f8a5bc33b2fc58b1daf78c81bf1d5c8add19cea050adeb37da3a7bf8f813c6a6922b42934a6441fa6bb1c7fc208 0x0002030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20 0xdbca6fb13badb7cfdf76510070ffad15b85f9934224a9e11202f5e8f86b584a6 CHECKSIG",
+                       false, false,
+               },
+               {
+                       // This one has a wrong byte in the pubkey
+                       "0x26ced30b1942b89ef5332a9f22f1a61e5a6a3f8a5bc33b2fc58b1daf78c81bf1d5c8add19cea050adeb37da3a7bf8f813c6a6922b42934a6441fa6bb1c7fc208 0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20 0x00ca6fb13badb7cfdf76510070ffad15b85f9934224a9e11202f5e8f86b584a6 CHECKSIG",
+                       false, false,
+               },
+               {
+                       "0x010203 0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20 0x040506 1 1 CHECKMULTISIG",
+                       false, false,
+               },
+               {
+                       "0x010203 0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f 0x040506 1 1 CHECKMULTISIG",
+                       false, true,
+               },
+               {
+                       "0x26ced30b1942b89ef5332a9f22f1a61e5a6a3f8a5bc33b2fc58b1daf78c81bf1d5c8add19cea050adeb37da3a7bf8f813c6a6922b42934a6441fa6bb1c7fc208 0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20 0xdbca6fb13badb7cfdf76510070ffad15b85f9934224a9e11202f5e8f86b584a6 1 1 CHECKMULTISIG",
+                       true, false,
+               },
+       }
+
+       for i, c := range cases {
+               prog, err := Assemble(c.prog)
+               if err != nil {
+                       t.Fatalf("case %d: %s", i, err)
+               }
+               vm := &virtualMachine{
+                       program:  prog,
+                       runLimit: 50000,
+               }
+               err = vm.run()
+               if c.err {
+                       if err == nil {
+                               t.Errorf("case %d: expected error, got ok result", i)
+                       }
+               } else if c.ok {
+                       if err != nil {
+                               t.Errorf("case %d: expected ok result, got error %s", i, err)
+                       }
+               } else if !vm.falseResult() {
+                       t.Errorf("case %d: expected false VM result, got error %s", i, err)
+               }
+       }
+}
+
+func TestCryptoOps(t *testing.T) {
+       type testStruct struct {
+               op      Op
+               startVM *virtualMachine
+               wantErr error
+               wantVM  *virtualMachine
+       }
+       cases := []testStruct{{
+               op: OP_SHA256,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit: 49905,
+                       dataStack: [][]byte{{
+                               75, 245, 18, 47, 52, 69, 84, 197, 59, 222, 46, 187, 140, 210, 183, 227,
+                               209, 96, 10, 214, 49, 195, 133, 165, 215, 204, 226, 60, 119, 133, 69, 154,
+                       }},
+               },
+       }, {
+               op: OP_SHA256,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{make([]byte, 65)},
+               },
+               wantVM: &virtualMachine{
+                       runLimit: 49968,
+                       dataStack: [][]byte{{
+                               152, 206, 66, 222, 239, 81, 212, 2, 105, 213, 66, 245, 49, 75, 239, 44,
+                               116, 104, 212, 1, 173, 93, 133, 22, 139, 250, 180, 192, 16, 143, 117, 247,
+                       }},
+               },
+       }, {
+               op: OP_SHA3,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit: 49905,
+                       dataStack: [][]byte{{
+                               39, 103, 241, 92, 138, 242, 242, 199, 34, 93, 82, 115, 253, 214, 131, 237,
+                               199, 20, 17, 10, 152, 125, 16, 84, 105, 124, 52, 138, 237, 78, 108, 199,
+                       }},
+               },
+       }, {
+               op: OP_SHA3,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{make([]byte, 65)},
+               },
+               wantVM: &virtualMachine{
+                       runLimit: 49968,
+                       dataStack: [][]byte{{
+                               65, 106, 167, 181, 192, 224, 101, 48, 102, 167, 198, 77, 189, 208, 0, 157,
+                               190, 132, 56, 97, 81, 254, 3, 159, 217, 66, 250, 162, 219, 97, 114, 235,
+                       }},
+               },
+       }, {
+               op: OP_CHECKSIG,
+               startVM: &virtualMachine{
+                       runLimit: 50000,
+                       dataStack: [][]byte{
+                               mustDecodeHex("af5abdf4bbb34f4a089efc298234f84fd909def662a8df03b4d7d40372728851" +
+                                       "fbd3bf59920af5a7c361a4851967714271d1727e3be417a60053c30969d8860c"),
+                               mustDecodeHex("916f0027a575074ce72a331777c3478d6513f786a591bd892da1a577bf2335f9"),
+                               mustDecodeHex("ab3220d065dc875c6a5b4ecc39809b5f24eb0a605e9eef5190457edbf1e3b866"),
+                       },
+               },
+               wantVM: &virtualMachine{
+                       deferredCost: -143,
+                       runLimit:     48976,
+                       dataStack:    [][]byte{{1}},
+               },
+       }, {
+               op: OP_CHECKSIG,
+               startVM: &virtualMachine{
+                       runLimit: 50000,
+                       dataStack: [][]byte{
+                               mustDecodeHex("af5abdf4bbb34f4a089efc298234f84fd909def662a8df03b4d7d40372728851" +
+                                       "fbd3bf59920af5a7c361a4851967714271d1727e3be417a60053c30969d8860c"),
+                               mustDecodeHex("badda7a7a575074ce72a331777c3478d6513f786a591bd892da1a577bf2335f9"),
+                               mustDecodeHex("ab3220d065dc875c6a5b4ecc39809b5f24eb0a605e9eef5190457edbf1e3b866"),
+                       },
+               },
+               wantVM: &virtualMachine{
+                       deferredCost: -144,
+                       runLimit:     48976,
+                       dataStack:    [][]byte{{}},
+               },
+       }, {
+               op: OP_CHECKSIG,
+               startVM: &virtualMachine{
+                       runLimit: 50000,
+                       dataStack: [][]byte{
+                               mustDecodeHex("af5abdf4bbb34f4a089efc298234f84fd909def662a8df03b4d7d40372728851" +
+                                       "fbd3bf59920af5a7c361a4851967714271d1727e3be417a60053c30969d8860c"),
+                               mustDecodeHex("916f0027a575074ce72a331777c3478d6513f786a591bd892da1a577bf2335f9"),
+                               mustDecodeHex("bad220d065dc875c6a5b4ecc39809b5f24eb0a605e9eef5190457edbf1e3b866"),
+                       },
+               },
+               wantVM: &virtualMachine{
+                       deferredCost: -144,
+                       runLimit:     48976,
+                       dataStack:    [][]byte{{}},
+               },
+       }, {
+               op: OP_CHECKSIG,
+               startVM: &virtualMachine{
+                       runLimit: 50000,
+                       dataStack: [][]byte{
+                               mustDecodeHex("badabdf4bbb34f4a089efc298234f84fd909def662a8df03b4d7d40372728851" +
+                                       "fbd3bf59920af5a7c361a4851967714271d1727e3be417a60053c30969d8860c"),
+                               mustDecodeHex("916f0027a575074ce72a331777c3478d6513f786a591bd892da1a577bf2335f9"),
+                               mustDecodeHex("ab3220d065dc875c6a5b4ecc39809b5f24eb0a605e9eef5190457edbf1e3b866"),
+                       },
+               },
+               wantVM: &virtualMachine{
+                       deferredCost: -144,
+                       runLimit:     48976,
+                       dataStack:    [][]byte{{}},
+               },
+       }, {
+               op: OP_CHECKSIG,
+               startVM: &virtualMachine{
+                       runLimit: 50000,
+                       dataStack: [][]byte{
+                               mustDecodeHex("af5abdf4bbb34f4a089efc298234f84fd909def662a8df03b4d7d40372728851" +
+                                       "fbd3bf59920af5a7c361a4851967714271d1727e3be417a60053c30969d8860c"),
+                               mustDecodeHex("badbad"),
+                               mustDecodeHex("ab3220d065dc875c6a5b4ecc39809b5f24eb0a605e9eef5190457edbf1e3b866"),
+                       },
+               },
+               wantErr: ErrBadValue,
+       }, {
+               op: OP_CHECKSIG,
+               startVM: &virtualMachine{
+                       runLimit: 0,
+               },
+               wantErr: ErrRunLimitExceeded,
+       }, {
+               op: OP_CHECKMULTISIG,
+               startVM: &virtualMachine{
+                       runLimit: 50000,
+                       dataStack: [][]byte{
+                               mustDecodeHex("af5abdf4bbb34f4a089efc298234f84fd909def662a8df03b4d7d40372728851fbd3bf59920af5a7c361a4851967714271d1727e3be417a60053c30969d8860c"),
+                               mustDecodeHex("916f0027a575074ce72a331777c3478d6513f786a591bd892da1a577bf2335f9"),
+                               mustDecodeHex("ab3220d065dc875c6a5b4ecc39809b5f24eb0a605e9eef5190457edbf1e3b866"),
+                               {1},
+                               {1},
+                       },
+               },
+               wantVM: &virtualMachine{
+                       deferredCost: -161,
+                       runLimit:     48976,
+                       dataStack:    [][]byte{{1}},
+               },
+       }, {
+               op: OP_CHECKMULTISIG,
+               startVM: &virtualMachine{
+                       runLimit: 50000,
+                       dataStack: [][]byte{
+                               mustDecodeHex("badabdf4bbb34f4a089efc298234f84fd909def662a8df03b4d7d40372728851fbd3bf59920af5a7c361a4851967714271d1727e3be417a60053c30969d8860c"),
+                               mustDecodeHex("916f0027a575074ce72a331777c3478d6513f786a591bd892da1a577bf2335f9"),
+                               mustDecodeHex("ab3220d065dc875c6a5b4ecc39809b5f24eb0a605e9eef5190457edbf1e3b866"),
+                               {1},
+                               {1},
+                       },
+               },
+               wantVM: &virtualMachine{
+                       deferredCost: -162,
+                       runLimit:     48976,
+                       dataStack:    [][]byte{{}},
+               },
+       }, {
+               op: OP_CHECKMULTISIG,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{},
+               },
+               wantErr: ErrDataStackUnderflow,
+       }, {
+               op: OP_CHECKMULTISIG,
+               startVM: &virtualMachine{
+                       runLimit: 50000,
+                       dataStack: [][]byte{
+                               {1},
+                               {1},
+                       },
+               },
+               wantErr: ErrDataStackUnderflow,
+       }, {
+               op: OP_CHECKMULTISIG,
+               startVM: &virtualMachine{
+                       runLimit: 50000,
+                       dataStack: [][]byte{
+                               mustDecodeHex("916f0027a575074ce72a331777c3478d6513f786a591bd892da1a577bf2335f9"),
+                               {1},
+                               {1},
+                       },
+               },
+               wantErr: ErrDataStackUnderflow,
+       }, {
+               op: OP_CHECKMULTISIG,
+               startVM: &virtualMachine{
+                       runLimit: 50000,
+                       dataStack: [][]byte{
+                               mustDecodeHex("ab3220d065dc875c6a5b4ecc39809b5f24eb0a605e9eef5190457edbf1e3b866"),
+                               mustDecodeHex("916f0027a575074ce72a331777c3478d6513f786a591bd892da1a577bf2335f9"),
+                               {1},
+                               {1},
+                       },
+               },
+               wantErr: ErrDataStackUnderflow,
+       }, {
+               op: OP_CHECKMULTISIG,
+               startVM: &virtualMachine{
+                       runLimit: 50000,
+                       dataStack: [][]byte{
+                               mustDecodeHex("af5abdf4bbb34f4a089efc298234f84fd909def662a8df03b4d7d40372728851fbd3bf59920af5a7c361a4851967714271d1727e3be417a60053c30969d8860c"),
+                               mustDecodeHex("badbad"),
+                               mustDecodeHex("ab3220d065dc875c6a5b4ecc39809b5f24eb0a605e9eef5190457edbf1e3b866"),
+                               {1},
+                               {1},
+                       },
+               },
+               wantErr: ErrBadValue,
+       }, {
+               op: OP_CHECKMULTISIG,
+               startVM: &virtualMachine{
+                       runLimit: 50000,
+                       dataStack: [][]byte{
+                               mustDecodeHex("af5abdf4bbb34f4a089efc298234f84fd909def662a8df03b4d7d40372728851fbd3bf59920af5a7c361a4851967714271d1727e3be417a60053c30969d8860c"),
+                               mustDecodeHex("916f0027a575074ce72a331777c3478d6513f786a591bd892da1a577bf2335f9"),
+                               mustDecodeHex("ab3220d065dc875c6a5b4ecc39809b5f24eb0a605e9eef5190457edbf1e3b866"),
+                               {1},
+                               {0},
+                       },
+               },
+               wantErr: ErrBadValue,
+       }, {
+               op: OP_CHECKMULTISIG,
+               startVM: &virtualMachine{
+                       runLimit: 50000,
+                       dataStack: [][]byte{
+                               mustDecodeHex("af5abdf4bbb34f4a089efc298234f84fd909def662a8df03b4d7d40372728851fbd3bf59920af5a7c361a4851967714271d1727e3be417a60053c30969d8860c"),
+                               mustDecodeHex("916f0027a575074ce72a331777c3478d6513f786a591bd892da1a577bf2335f9"),
+                               mustDecodeHex("ab3220d065dc875c6a5b4ecc39809b5f24eb0a605e9eef5190457edbf1e3b866"),
+                               {0},
+                               {1},
+                       },
+               },
+               wantErr: ErrBadValue,
+       }, {
+               op: OP_CHECKMULTISIG,
+               startVM: &virtualMachine{
+                       runLimit: 50000,
+                       dataStack: [][]byte{
+                               mustDecodeHex("af5abdf4bbb34f4a089efc298234f84fd909def662a8df03b4d7d40372728851fbd3bf59920af5a7c361a4851967714271d1727e3be417a60053c30969d8860c"),
+                               mustDecodeHex("916f0027a575074ce72a331777c3478d6513f786a591bd892da1a577bf2335f9"),
+                               mustDecodeHex("ab3220d065dc875c6a5b4ecc39809b5f24eb0a605e9eef5190457edbf1e3b866"),
+                               {2},
+                               {1},
+                       },
+               },
+               wantErr: ErrBadValue,
+       }, {
+               op: OP_CHECKMULTISIG,
+               startVM: &virtualMachine{
+                       runLimit: 0,
+                       dataStack: [][]byte{
+                               mustDecodeHex("af5abdf4bbb34f4a089efc298234f84fd909def662a8df03b4d7d40372728851fbd3bf59920af5a7c361a4851967714271d1727e3be417a60053c30969d8860c"),
+                               mustDecodeHex("916f0027a575074ce72a331777c3478d6513f786a591bd892da1a577bf2335f9"),
+                               mustDecodeHex("ab3220d065dc875c6a5b4ecc39809b5f24eb0a605e9eef5190457edbf1e3b866"),
+                               {1},
+                               {1},
+                       },
+               },
+               wantErr: ErrRunLimitExceeded,
+       }, {
+               op: OP_TXSIGHASH,
+               startVM: &virtualMachine{
+                       runLimit: 50000,
+                       context: &Context{
+                               TxSigHash: func() []byte {
+                                       return []byte{
+                                               0x2f, 0x00, 0x3c, 0xdd, 0x64, 0x42, 0x7b, 0x5e,
+                                               0xed, 0xd6, 0xcc, 0xb5, 0x85, 0x47, 0x02, 0x0b,
+                                               0x02, 0xde, 0xf2, 0x2d, 0xc5, 0x99, 0x7e, 0x9d,
+                                               0xa9, 0xac, 0x40, 0x49, 0xc3, 0x4a, 0x58, 0xd8,
+                                       }
+                               },
+                       },
+               },
+               wantVM: &virtualMachine{
+                       runLimit: 49704,
+                       dataStack: [][]byte{{
+                               47, 0, 60, 221, 100, 66, 123, 94,
+                               237, 214, 204, 181, 133, 71, 2, 11,
+                               2, 222, 242, 45, 197, 153, 126, 157,
+                               169, 172, 64, 73, 195, 74, 88, 216,
+                       }},
+               },
+       }, {
+               op: OP_TXSIGHASH,
+               startVM: &virtualMachine{
+                       runLimit: 0,
+                       context:  &Context{},
+               },
+               wantErr: ErrRunLimitExceeded,
+       }, {
+               op: OP_BLOCKHASH,
+               startVM: &virtualMachine{
+                       runLimit: 50000,
+                       context:  emptyBlockVMContext,
+               },
+               wantVM: &virtualMachine{
+                       runLimit: 49959,
+                       dataStack: [][]byte{{
+                               240, 133, 79, 136, 180, 137, 0, 153,
+                               47, 236, 64, 67, 249, 101, 250, 2,
+                               157, 235, 138, 214, 147, 207, 55, 17,
+                               254, 131, 9, 179, 144, 106, 90, 134,
+                       }},
+                       context: emptyBlockVMContext,
+               },
+       }, {
+               op: OP_BLOCKHASH,
+               startVM: &virtualMachine{
+                       runLimit: 0,
+                       context:  emptyBlockVMContext,
+               },
+               wantErr: ErrRunLimitExceeded,
+       }}
+
+       hashOps := []Op{OP_SHA256, OP_SHA3}
+       for _, op := range hashOps {
+               cases = append(cases, testStruct{
+                       op: op,
+                       startVM: &virtualMachine{
+                               runLimit:  0,
+                               dataStack: [][]byte{{1}},
+                       },
+                       wantErr: ErrRunLimitExceeded,
+               })
+       }
+
+       for i, c := range cases {
+               t.Logf("case %d", i)
+
+               err := ops[c.op].fn(c.startVM)
+               gotVM := c.startVM
+
+               if err != c.wantErr {
+                       t.Errorf("case %d, op %s: got err = %v want %v", i, ops[c.op].name, err, c.wantErr)
+                       continue
+               }
+               if c.wantErr != nil {
+                       continue
+               }
+
+               // Hack: the context objects will otherwise compare unequal
+               // sometimes (because of the function pointer within?) and we
+               // don't care
+               c.wantVM.context = gotVM.context
+
+               if !testutil.DeepEqual(gotVM, c.wantVM) {
+                       t.Errorf("case %d, op %s: unexpected vm result\n\tgot:  %+v\n\twant: %+v\n", i, ops[c.op].name, gotVM, c.wantVM)
+               }
+       }
+}
diff --git a/protocol/vm/doc.go b/protocol/vm/doc.go
new file mode 100644 (file)
index 0000000..78b3940
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+Package vm implements the VM described in Chain Protocol 1.
+
+The VM is for verifying transaction inputs and blocks. Accordingly
+there are two main entrypoints: VerifyTxInput and VerifyBlockHeader,
+both in vm.go. Each constructs a disposable VM object to perform its
+computation.
+
+For VerifyTxInput, the program to execute comes from the input
+commitment: either the prevout's control program, if it's a spend
+input; or the issuance program, if it's an issuance. For
+VerifyBlockHeader, the program to execute is the previous block's
+consensus program.  In all cases, the VM's data stack is first
+populated with witness data from the current object (transaction input
+or block).
+
+The program is interpreted byte-by-byte by the main loop in
+virtualMachine.run(). Most bytes are opcodes in one of the following categories:
+  - bitwise
+  - control
+  - crypto
+  - introspection
+  - numeric
+  - pushdata
+  - splice
+  - stack
+Each category has a corresponding .go file implementing those opcodes.
+
+Each instruction incurs some cost when executed. These costs are
+deducted from (and in some cases refunded to) a predefined run
+limit. Costs are tallied in two conceptual phases: "before" the
+instruction runs and "after." In practice, "before" charges are
+applied on the fly in the body of each opcode's implementation, and
+"after" charges are deferred until the instruction finishes, at which
+point the VM main loop applies the deferred charges. As such,
+functions that have associated costs (chiefly stack pushing and
+popping) include a "deferred" flag as an argument.
+*/
+package vm
diff --git a/protocol/vm/errors.go b/protocol/vm/errors.go
new file mode 100644 (file)
index 0000000..d70c03b
--- /dev/null
@@ -0,0 +1,22 @@
+package vm
+
+import "errors"
+
+var (
+       ErrAltStackUnderflow  = errors.New("alt stack underflow")
+       ErrBadValue           = errors.New("bad value")
+       ErrContext            = errors.New("wrong context")
+       ErrDataStackUnderflow = errors.New("data stack underflow")
+       ErrDisallowedOpcode   = errors.New("disallowed opcode")
+       ErrDivZero            = errors.New("division by zero")
+       ErrLongProgram        = errors.New("program size exceeds maxint32")
+       ErrRange              = errors.New("range error")
+       ErrReturn             = errors.New("RETURN executed")
+       ErrRunLimitExceeded   = errors.New("run limit exceeded")
+       ErrShortProgram       = errors.New("unexpected end of program")
+       ErrToken              = errors.New("unrecognized token")
+       ErrUnexpected         = errors.New("unexpected error")
+       ErrUnsupportedTx      = errors.New("unsupported transaction type")
+       ErrUnsupportedVM      = errors.New("unsupported VM")
+       ErrVerifyFailed       = errors.New("VERIFY failed")
+)
diff --git a/protocol/vm/introspection.go b/protocol/vm/introspection.go
new file mode 100644 (file)
index 0000000..d1a525c
--- /dev/null
@@ -0,0 +1,209 @@
+package vm
+
+import "math"
+
+func opCheckOutput(vm *virtualMachine) error {
+       err := vm.applyCost(16)
+       if err != nil {
+               return err
+       }
+
+       code, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       vmVersion, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       if vmVersion < 0 {
+               return ErrBadValue
+       }
+       assetID, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       amount, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       if amount < 0 {
+               return ErrBadValue
+       }
+       data, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       index, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       if index < 0 {
+               return ErrBadValue
+       }
+
+       if vm.context.CheckOutput == nil {
+               return ErrContext
+       }
+
+       ok, err := vm.context.CheckOutput(uint64(index), data, uint64(amount), assetID, uint64(vmVersion), code, vm.expansionReserved)
+       if err != nil {
+               return err
+       }
+       return vm.pushBool(ok, true)
+}
+
+func opAsset(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+
+       if vm.context.AssetID == nil {
+               return ErrContext
+       }
+       return vm.push(*vm.context.AssetID, true)
+}
+
+func opAmount(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+
+       if vm.context.Amount == nil {
+               return ErrContext
+       }
+       return vm.pushInt64(int64(*vm.context.Amount), true)
+}
+
+func opProgram(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+
+       return vm.push(vm.context.Code, true)
+}
+
+func opMinTime(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+
+       if vm.context.MinTimeMS == nil {
+               return ErrContext
+       }
+       return vm.pushInt64(int64(*vm.context.MinTimeMS), true)
+}
+
+func opMaxTime(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+
+       if vm.context.MaxTimeMS == nil {
+               return ErrContext
+       }
+       maxTimeMS := *vm.context.MaxTimeMS
+       if maxTimeMS == 0 || maxTimeMS > math.MaxInt64 {
+               maxTimeMS = uint64(math.MaxInt64)
+       }
+
+       return vm.pushInt64(int64(maxTimeMS), true)
+}
+
+func opEntryData(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+
+       if vm.context.EntryData == nil {
+               return ErrContext
+       }
+
+       return vm.push(*vm.context.EntryData, true)
+}
+
+func opTxData(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+
+       if vm.context.TxData == nil {
+               return ErrContext
+       }
+       return vm.push(*vm.context.TxData, true)
+}
+
+func opIndex(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+
+       if vm.context.DestPos == nil {
+               return ErrContext
+       }
+       return vm.pushInt64(int64(*vm.context.DestPos), true)
+}
+
+func opEntryID(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+       return vm.push(vm.context.EntryID, true)
+}
+
+func opOutputID(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+
+       if vm.context.SpentOutputID == nil {
+               return ErrContext
+       }
+       return vm.push(*vm.context.SpentOutputID, true)
+}
+
+func opNonce(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+
+       if vm.context.AnchorID == nil {
+               return ErrContext
+       }
+       return vm.push(*vm.context.AnchorID, true)
+}
+
+func opNextProgram(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+
+       if vm.context.NextConsensusProgram == nil {
+               return ErrContext
+       }
+       return vm.push(*vm.context.NextConsensusProgram, true)
+}
+
+func opBlockTime(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+
+       if vm.context.BlockTimeMS == nil {
+               return ErrContext
+       }
+       return vm.pushInt64(int64(*vm.context.BlockTimeMS), true)
+}
diff --git a/protocol/vm/introspection_test.go b/protocol/vm/introspection_test.go
new file mode 100644 (file)
index 0000000..c7a86fd
--- /dev/null
@@ -0,0 +1,424 @@
+package vm
+
+import (
+       "testing"
+
+       "github.com/davecgh/go-spew/spew"
+
+       "chain/errors"
+       "chain/testutil"
+)
+
+func TestNextProgram(t *testing.T) {
+       context := &Context{
+               NextConsensusProgram: &[]byte{1, 2, 3},
+       }
+
+       prog, err := Assemble("NEXTPROGRAM 0x010203 EQUAL")
+       if err != nil {
+               t.Fatal(err)
+       }
+       vm := &virtualMachine{
+               runLimit: 50000,
+               program:  prog,
+               context:  context,
+       }
+       err = vm.run()
+       if err != nil {
+               t.Errorf("got error %s, expected none", err)
+       }
+
+       prog, err = Assemble("NEXTPROGRAM 0x0102 EQUAL")
+       if err != nil {
+               t.Fatal(err)
+       }
+       vm = &virtualMachine{
+               runLimit: 50000,
+               program:  prog,
+               context:  context,
+       }
+       err = vm.run()
+       if err == nil && vm.falseResult() {
+               err = ErrFalseVMResult
+       }
+       switch err {
+       case nil:
+               t.Error("got ok result, expected failure")
+       case ErrFalseVMResult:
+               // ok
+       default:
+               t.Errorf("got error %s, expected ErrFalseVMResult", err)
+       }
+}
+
+func TestBlockTime(t *testing.T) {
+       var blockTimeMS uint64 = 3263826
+
+       prog, err := Assemble("BLOCKTIME 3263826 NUMEQUAL")
+       if err != nil {
+               t.Fatal(err)
+       }
+       vm := &virtualMachine{
+               runLimit: 50000,
+               program:  prog,
+               context:  &Context{BlockTimeMS: &blockTimeMS},
+       }
+       err = vm.run()
+       if err != nil {
+               t.Errorf("got error %s, expected none", err)
+       }
+       if vm.falseResult() {
+               t.Error("result is false, want success")
+       }
+
+       prog, err = Assemble("BLOCKTIME 3263827 NUMEQUAL")
+       if err != nil {
+               t.Fatal(err)
+       }
+       vm = &virtualMachine{
+               runLimit: 50000,
+               program:  prog,
+               context:  &Context{BlockTimeMS: &blockTimeMS},
+       }
+       err = vm.run()
+       if err == nil && vm.falseResult() {
+               err = ErrFalseVMResult
+       }
+       switch err {
+       case nil:
+               t.Error("got ok result, expected failure")
+       case ErrFalseVMResult:
+               // ok
+       default:
+               t.Errorf("got error %s, expected ErrFalseVMResult", err)
+       }
+}
+
+func TestOutputIDAndNonceOp(t *testing.T) {
+       // arbitrary
+       outputID := mustDecodeHex("0a60f9b12950c84c221012a808ef7782823b7e16b71fe2ba01811cda96a217df")
+       nonceID := mustDecodeHex("c4a6e6256debfca379595e444b91af56846397e8007ea87c40c622170dd13ff7")
+
+       prog := []byte{uint8(OP_OUTPUTID)}
+       vm := &virtualMachine{
+               runLimit: 50000,
+               program:  prog,
+               context:  &Context{SpentOutputID: &outputID},
+       }
+       err := vm.step()
+       if err != nil {
+               t.Fatal(err)
+       }
+       gotVM := vm
+
+       expectedStack := [][]byte{outputID}
+       if !testutil.DeepEqual(gotVM.dataStack, expectedStack) {
+               t.Errorf("expected stack %v, got %v; vm is:\n%s", expectedStack, gotVM.dataStack, spew.Sdump(vm))
+       }
+
+       prog = []byte{uint8(OP_OUTPUTID)}
+       vm = &virtualMachine{
+               runLimit: 50000,
+               program:  prog,
+               context:  &Context{SpentOutputID: nil},
+       }
+       err = vm.step()
+       if err != ErrContext {
+               t.Errorf("expected ErrContext, got %v", err)
+       }
+
+       prog = []byte{uint8(OP_NONCE)}
+       vm = &virtualMachine{
+               runLimit: 50000,
+               program:  prog,
+               context:  &Context{AnchorID: nil},
+       }
+       err = vm.step()
+       if err != ErrContext {
+               t.Errorf("expected ErrContext, got %v", err)
+       }
+
+       prog = []byte{uint8(OP_NONCE)}
+       vm = &virtualMachine{
+               runLimit: 50000,
+               program:  prog,
+               context:  &Context{AnchorID: &nonceID},
+       }
+       err = vm.step()
+       if err != nil {
+               t.Fatal(err)
+       }
+       gotVM = vm
+
+       expectedStack = [][]byte{nonceID}
+       if !testutil.DeepEqual(gotVM.dataStack, expectedStack) {
+               t.Errorf("expected stack %v, got %v", expectedStack, gotVM.dataStack)
+       }
+}
+
+func TestIntrospectionOps(t *testing.T) {
+       // arbitrary
+       entryID := mustDecodeHex("2e68d78cdeaa98944c12512cf9c719eb4881e9afb61e4b766df5f369aee6392c")
+       entryData := mustDecodeHex("44be5e14ce216f4b2c35a5eb0b35d078bda55cf05b5d36ee0e7a01fbc6ef62b7")
+       assetID := mustDecodeHex("0100000000000000000000000000000000000000000000000000000000000000")
+       txData := mustDecodeHex("3e5190f2691e6d451c50edf9a9a66a7a6779c787676452810dbf4f6e4053682c")
+
+       type testStruct struct {
+               op      Op
+               startVM *virtualMachine
+               wantErr error
+               wantVM  *virtualMachine
+       }
+       cases := []testStruct{{
+               op: OP_CHECKOUTPUT,
+               startVM: &virtualMachine{
+                       dataStack: [][]byte{
+                               {0},
+                               []byte{},
+                               {1},
+                               append([]byte{9}, make([]byte, 31)...),
+                               {1},
+                               []byte("missingprog"),
+                       },
+                       context: &Context{
+                               CheckOutput: func(uint64, []byte, uint64, []byte, uint64, []byte, bool) (bool, error) {
+                                       return false, nil
+                               },
+                       },
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     50070,
+                       deferredCost: -86,
+                       dataStack:    [][]byte{{}},
+               },
+       }, {
+               op: OP_CHECKOUTPUT,
+               startVM: &virtualMachine{
+                       dataStack: [][]byte{
+                               {4},
+                               mustDecodeHex("1f2a05f881ed9fa0c9068a84823677409f863891a2196eb55dbfbb677a566374"),
+                               {7},
+                               append([]byte{2}, make([]byte, 31)...),
+                               Int64Bytes(-1),
+                               []byte("controlprog"),
+                       },
+                       context: &Context{},
+               },
+               wantErr: ErrBadValue,
+       }, {
+               op: OP_CHECKOUTPUT,
+               startVM: &virtualMachine{
+                       dataStack: [][]byte{
+                               {4},
+                               mustDecodeHex("1f2a05f881ed9fa0c9068a84823677409f863891a2196eb55dbfbb677a566374"),
+                               Int64Bytes(-1),
+                               append([]byte{2}, make([]byte, 31)...),
+                               {1},
+                               []byte("controlprog"),
+                       },
+                       context: &Context{},
+               },
+               wantErr: ErrBadValue,
+       }, {
+               op: OP_CHECKOUTPUT,
+               startVM: &virtualMachine{
+                       dataStack: [][]byte{
+                               Int64Bytes(-1),
+                               mustDecodeHex("1f2a05f881ed9fa0c9068a84823677409f863891a2196eb55dbfbb677a566374"),
+                               {7},
+                               append([]byte{2}, make([]byte, 31)...),
+                               {1},
+                               []byte("controlprog"),
+                       },
+                       context: &Context{},
+               },
+               wantErr: ErrBadValue,
+       }, {
+               op: OP_CHECKOUTPUT,
+               startVM: &virtualMachine{
+                       dataStack: [][]byte{
+                               {5},
+                               mustDecodeHex("1f2a05f881ed9fa0c9068a84823677409f863891a2196eb55dbfbb677a566374"),
+                               {7},
+                               append([]byte{2}, make([]byte, 31)...),
+                               {1},
+                               []byte("controlprog"),
+                       },
+                       context: &Context{
+                               CheckOutput: func(uint64, []byte, uint64, []byte, uint64, []byte, bool) (bool, error) {
+                                       return false, ErrBadValue
+                               },
+                       },
+               },
+               wantErr: ErrBadValue,
+       }, {
+               op: OP_CHECKOUTPUT,
+               startVM: &virtualMachine{
+                       runLimit: 0,
+                       dataStack: [][]byte{
+                               {4},
+                               mustDecodeHex("1f2a05f881ed9fa0c9068a84823677409f863891a2196eb55dbfbb677a566374"),
+                               {7},
+                               append([]byte{2}, make([]byte, 31)...),
+                               {1},
+                               []byte("controlprog"),
+                       },
+                       context: &Context{},
+               },
+               wantErr: ErrRunLimitExceeded,
+       }, {
+               op: OP_ASSET,
+               startVM: &virtualMachine{
+                       context: &Context{AssetID: &assetID},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49959,
+                       deferredCost: 40,
+                       dataStack:    [][]byte{assetID},
+               },
+       }, {
+               op: OP_AMOUNT,
+               startVM: &virtualMachine{
+                       context: &Context{Amount: uint64ptr(5)},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49990,
+                       deferredCost: 9,
+                       dataStack:    [][]byte{{5}},
+               },
+       }, {
+               op: OP_PROGRAM,
+               startVM: &virtualMachine{
+                       program: []byte("spendprog"),
+                       context: &Context{Code: []byte("spendprog")},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49982,
+                       deferredCost: 17,
+                       dataStack:    [][]byte{[]byte("spendprog")},
+               },
+       }, {
+               op: OP_PROGRAM,
+               startVM: &virtualMachine{
+                       program:  []byte("issueprog"),
+                       runLimit: 50000,
+                       context:  &Context{Code: []byte("issueprog")},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49982,
+                       deferredCost: 17,
+                       dataStack:    [][]byte{[]byte("issueprog")},
+               },
+       }, {
+               op: OP_MINTIME,
+               startVM: &virtualMachine{
+                       context: &Context{MinTimeMS: new(uint64)},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49991,
+                       deferredCost: 8,
+                       dataStack:    [][]byte{[]byte{}},
+               },
+       }, {
+               op: OP_MAXTIME,
+               startVM: &virtualMachine{
+                       context: &Context{MaxTimeMS: uint64ptr(20)},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49990,
+                       deferredCost: 9,
+                       dataStack:    [][]byte{{20}},
+               },
+       }, {
+               op: OP_TXDATA,
+               startVM: &virtualMachine{
+                       context: &Context{TxData: &txData},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49959,
+                       deferredCost: 40,
+                       dataStack:    [][]byte{txData},
+               },
+       }, {
+               op: OP_ENTRYDATA,
+               startVM: &virtualMachine{
+                       context: &Context{EntryData: &entryData},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49959,
+                       deferredCost: 40,
+                       dataStack:    [][]byte{entryData},
+               },
+       }, {
+               op: OP_INDEX,
+               startVM: &virtualMachine{
+                       context: &Context{DestPos: new(uint64)},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49991,
+                       deferredCost: 8,
+                       dataStack:    [][]byte{[]byte{}},
+               },
+       }, {
+               op: OP_ENTRYID,
+               startVM: &virtualMachine{
+                       context: &Context{EntryID: entryID},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49959,
+                       deferredCost: 40,
+                       dataStack:    [][]byte{entryID},
+               },
+       }}
+
+       txops := []Op{
+               OP_CHECKOUTPUT, OP_ASSET, OP_AMOUNT, OP_PROGRAM,
+               OP_MINTIME, OP_MAXTIME, OP_TXDATA, OP_ENTRYDATA,
+               OP_INDEX, OP_OUTPUTID,
+       }
+
+       for _, op := range txops {
+               cases = append(cases, testStruct{
+                       op: op,
+                       startVM: &virtualMachine{
+                               runLimit: 0,
+                               context:  &Context{},
+                       },
+                       wantErr: ErrRunLimitExceeded,
+               })
+       }
+
+       for i, c := range cases {
+               t.Logf("case %d", i)
+               prog := []byte{byte(c.op)}
+               vm := c.startVM
+               if c.wantErr != ErrRunLimitExceeded {
+                       vm.runLimit = 50000
+               }
+               vm.program = prog
+               err := vm.run()
+               switch errors.Root(err) {
+               case c.wantErr:
+                       // ok
+               case nil:
+                       t.Errorf("case %d, op %s: got no error, want %v", i, ops[c.op].name, c.wantErr)
+               default:
+                       t.Errorf("case %d, op %s: got err = %v want %v", i, ops[c.op].name, err, c.wantErr)
+               }
+               if c.wantErr != nil {
+                       continue
+               }
+               gotVM := vm
+
+               c.wantVM.program = prog
+               c.wantVM.pc = 1
+               c.wantVM.nextPC = 1
+               c.wantVM.context = gotVM.context
+
+               if !testutil.DeepEqual(gotVM, c.wantVM) {
+                       t.Errorf("case %d, op %s: unexpected vm result\n\tgot:  %+v\n\twant: %+v\nstartVM is:\n%s", i, ops[c.op].name, gotVM, c.wantVM, spew.Sdump(c.startVM))
+               }
+       }
+}
+
+func uint64ptr(n uint64) *uint64 { return &n }
diff --git a/protocol/vm/numeric.go b/protocol/vm/numeric.go
new file mode 100644 (file)
index 0000000..a478b33
--- /dev/null
@@ -0,0 +1,459 @@
+package vm
+
+import (
+       "math"
+
+       "chain/math/checked"
+)
+
+func op1Add(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       n, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       res, ok := checked.AddInt64(n, 1)
+       if !ok {
+               return ErrRange
+       }
+       return vm.pushInt64(res, true)
+}
+
+func op1Sub(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       n, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       res, ok := checked.SubInt64(n, 1)
+       if !ok {
+               return ErrRange
+       }
+       return vm.pushInt64(res, true)
+}
+
+func op2Mul(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       n, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       res, ok := checked.MulInt64(n, 2)
+       if !ok {
+               return ErrRange
+       }
+       return vm.pushInt64(res, true)
+}
+
+func op2Div(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       n, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       return vm.pushInt64(n>>1, true)
+}
+
+func opNegate(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       n, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       res, ok := checked.NegateInt64(n)
+       if !ok {
+               return ErrRange
+       }
+       return vm.pushInt64(res, true)
+}
+
+func opAbs(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       n, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       if n == math.MinInt64 {
+               return ErrRange
+       }
+       if n < 0 {
+               n = -n
+       }
+       return vm.pushInt64(n, true)
+}
+
+func opNot(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       n, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       return vm.pushBool(n == 0, true)
+}
+
+func op0NotEqual(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       n, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       return vm.pushBool(n != 0, true)
+}
+
+func opAdd(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       y, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       x, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       res, ok := checked.AddInt64(x, y)
+       if !ok {
+               return ErrRange
+       }
+       return vm.pushInt64(res, true)
+}
+
+func opSub(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       y, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       x, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       res, ok := checked.SubInt64(x, y)
+       if !ok {
+               return ErrRange
+       }
+       return vm.pushInt64(res, true)
+}
+
+func opMul(vm *virtualMachine) error {
+       err := vm.applyCost(8)
+       if err != nil {
+               return err
+       }
+       y, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       x, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       res, ok := checked.MulInt64(x, y)
+       if !ok {
+               return ErrRange
+       }
+       return vm.pushInt64(res, true)
+}
+
+func opDiv(vm *virtualMachine) error {
+       err := vm.applyCost(8)
+       if err != nil {
+               return err
+       }
+       y, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       x, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       if y == 0 {
+               return ErrDivZero
+       }
+       res, ok := checked.DivInt64(x, y)
+       if !ok {
+               return ErrRange
+       }
+       return vm.pushInt64(res, true)
+}
+
+func opMod(vm *virtualMachine) error {
+       err := vm.applyCost(8)
+       if err != nil {
+               return err
+       }
+       y, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       x, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       if y == 0 {
+               return ErrDivZero
+       }
+
+       res, ok := checked.ModInt64(x, y)
+       if !ok {
+               return ErrRange
+       }
+
+       // Go's modulus operator produces the wrong result for mixed-sign
+       // operands
+       if res != 0 && (x >= 0) != (y >= 0) {
+               res += y
+       }
+
+       return vm.pushInt64(res, true)
+}
+
+func opLshift(vm *virtualMachine) error {
+       err := vm.applyCost(8)
+       if err != nil {
+               return err
+       }
+       y, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       if y < 0 {
+               return ErrBadValue
+       }
+       x, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       if x == 0 || y == 0 {
+               return vm.pushInt64(x, true)
+       }
+
+       res, ok := checked.LshiftInt64(x, y)
+       if !ok {
+               return ErrRange
+       }
+
+       return vm.pushInt64(res, true)
+}
+
+func opRshift(vm *virtualMachine) error {
+       err := vm.applyCost(8)
+       if err != nil {
+               return err
+       }
+       y, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       x, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       if y < 0 {
+               return ErrBadValue
+       }
+       return vm.pushInt64(x>>uint64(y), true)
+}
+
+func opBoolAnd(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       b, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       a, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       return vm.pushBool(AsBool(a) && AsBool(b), true)
+}
+
+func opBoolOr(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       b, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       a, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       return vm.pushBool(AsBool(a) || AsBool(b), true)
+}
+
+const (
+       cmpLess = iota
+       cmpLessEqual
+       cmpGreater
+       cmpGreaterEqual
+       cmpEqual
+       cmpNotEqual
+)
+
+func opNumEqual(vm *virtualMachine) error {
+       return doNumCompare(vm, cmpEqual)
+}
+
+func opNumEqualVerify(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       y, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       x, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       if x == y {
+               return nil
+       }
+       return ErrVerifyFailed
+}
+
+func opNumNotEqual(vm *virtualMachine) error {
+       return doNumCompare(vm, cmpNotEqual)
+}
+
+func opLessThan(vm *virtualMachine) error {
+       return doNumCompare(vm, cmpLess)
+}
+
+func opGreaterThan(vm *virtualMachine) error {
+       return doNumCompare(vm, cmpGreater)
+}
+
+func opLessThanOrEqual(vm *virtualMachine) error {
+       return doNumCompare(vm, cmpLessEqual)
+}
+
+func opGreaterThanOrEqual(vm *virtualMachine) error {
+       return doNumCompare(vm, cmpGreaterEqual)
+}
+
+func doNumCompare(vm *virtualMachine, op int) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       y, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       x, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       var res bool
+       switch op {
+       case cmpLess:
+               res = x < y
+       case cmpLessEqual:
+               res = x <= y
+       case cmpGreater:
+               res = x > y
+       case cmpGreaterEqual:
+               res = x >= y
+       case cmpEqual:
+               res = x == y
+       case cmpNotEqual:
+               res = x != y
+       }
+       return vm.pushBool(res, true)
+}
+
+func opMin(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       y, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       x, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       if x > y {
+               x = y
+       }
+       return vm.pushInt64(x, true)
+}
+
+func opMax(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       y, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       x, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       if x < y {
+               x = y
+       }
+       return vm.pushInt64(x, true)
+}
+
+func opWithin(vm *virtualMachine) error {
+       err := vm.applyCost(4)
+       if err != nil {
+               return err
+       }
+       max, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       min, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       x, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       return vm.pushBool(x >= min && x < max, true)
+}
diff --git a/protocol/vm/numeric_test.go b/protocol/vm/numeric_test.go
new file mode 100644 (file)
index 0000000..04b2d85
--- /dev/null
@@ -0,0 +1,565 @@
+package vm
+
+import (
+       "fmt"
+       "math"
+       "testing"
+
+       "chain/testutil"
+)
+
+func TestNumericOps(t *testing.T) {
+       type testStruct struct {
+               op      Op
+               startVM *virtualMachine
+               wantErr error
+               wantVM  *virtualMachine
+       }
+       cases := []testStruct{{
+               op: OP_1ADD,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49998,
+                       dataStack: [][]byte{{3}},
+               },
+       }, {
+               op: OP_1SUB,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49998,
+                       dataStack: [][]byte{{1}},
+               },
+       }, {
+               op: OP_2MUL,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49998,
+                       dataStack: [][]byte{{4}},
+               },
+       }, {
+               op: OP_2DIV,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49998,
+                       dataStack: [][]byte{{1}},
+               },
+       }, {
+               op: OP_2DIV,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{Int64Bytes(-2)},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49998,
+                       dataStack: [][]byte{Int64Bytes(-1)},
+               },
+       }, {
+               op: OP_2DIV,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{Int64Bytes(-1)},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49998,
+                       dataStack: [][]byte{Int64Bytes(-1)},
+               },
+       }, {
+               op: OP_NEGATE,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: 7,
+                       dataStack:    [][]byte{Int64Bytes(-2)},
+               },
+       }, {
+               op: OP_ABS,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49998,
+                       dataStack: [][]byte{{2}},
+               },
+       }, {
+               op: OP_ABS,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{Int64Bytes(-2)},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -7,
+                       dataStack:    [][]byte{{2}},
+               },
+       }, {
+               op: OP_NOT,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -1,
+                       dataStack:    [][]byte{{}},
+               },
+       }, {
+               op: OP_0NOTEQUAL,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49998,
+                       dataStack: [][]byte{{1}},
+               },
+       }, {
+               op: OP_ADD,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{3}},
+               },
+       }, {
+               op: OP_SUB,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{1}},
+               },
+       }, {
+               op: OP_MUL,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49992,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{2}},
+               },
+       }, {
+               op: OP_DIV,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49992,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{2}},
+               },
+       }, {
+               op: OP_DIV,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{Int64Bytes(-2), {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49992,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{Int64Bytes(-2)},
+               },
+       }, {
+               op: OP_DIV,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{Int64Bytes(-2), Int64Bytes(-1)},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49992,
+                       deferredCost: -23,
+                       dataStack:    [][]byte{{2}},
+               },
+       }, {
+               op: OP_DIV,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{Int64Bytes(-3), Int64Bytes(2)},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49992,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{Int64Bytes(-1)},
+               },
+       }, {
+               op: OP_DIV,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {}},
+               },
+               wantErr: ErrDivZero,
+       }, {
+               op: OP_MOD,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49992,
+                       deferredCost: -10,
+                       dataStack:    [][]byte{{}},
+               },
+       }, {
+               op: OP_MOD,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{Int64Bytes(-12), {10}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49992,
+                       deferredCost: -16,
+                       dataStack:    [][]byte{{8}},
+               },
+       }, {
+               op: OP_MOD,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {0}},
+               },
+               wantErr: ErrDivZero,
+       }, {
+               op: OP_LSHIFT,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49992,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{4}},
+               },
+       }, {
+               op: OP_LSHIFT,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{Int64Bytes(-2), {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49992,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{Int64Bytes(-4)},
+               },
+       }, {
+               op: OP_RSHIFT,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49992,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{1}},
+               },
+       }, {
+               op: OP_RSHIFT,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{Int64Bytes(-2), {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49992,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{Int64Bytes(-1)},
+               },
+       }, {
+               op: OP_BOOLAND,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{1}},
+               },
+       }, {
+               op: OP_BOOLOR,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{1}},
+               },
+       }, {
+               op: OP_NUMEQUAL,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -10,
+                       dataStack:    [][]byte{{}},
+               },
+       }, {
+               op: OP_NUMEQUALVERIFY,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {2}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -18,
+                       dataStack:    [][]byte{},
+               },
+       }, {
+               op: OP_NUMEQUALVERIFY,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{1}, {2}},
+               },
+               wantErr: ErrVerifyFailed,
+       }, {
+               op: OP_NUMNOTEQUAL,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{1}},
+               },
+       }, {
+               op: OP_LESSTHAN,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -10,
+                       dataStack:    [][]byte{{}},
+               },
+       }, {
+               op: OP_LESSTHANOREQUAL,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -10,
+                       dataStack:    [][]byte{{}},
+               },
+       }, {
+               op: OP_GREATERTHAN,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{1}},
+               },
+       }, {
+               op: OP_GREATERTHANOREQUAL,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{1}},
+               },
+       }, {
+               op: OP_MIN,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{1}},
+               },
+       }, {
+               op: OP_MIN,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{1}, {2}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{1}},
+               },
+       }, {
+               op: OP_MAX,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{2}},
+               },
+       }, {
+               op: OP_MAX,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{1}, {2}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49998,
+                       deferredCost: -9,
+                       dataStack:    [][]byte{{2}},
+               },
+       }, {
+               op: OP_WITHIN,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{1}, {1}, {2}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49996,
+                       deferredCost: -18,
+                       dataStack:    [][]byte{{1}},
+               },
+       }}
+
+       numops := []Op{
+               OP_1ADD, OP_1SUB, OP_2MUL, OP_2DIV, OP_NEGATE, OP_ABS, OP_NOT, OP_0NOTEQUAL,
+               OP_ADD, OP_SUB, OP_MUL, OP_DIV, OP_MOD, OP_LSHIFT, OP_RSHIFT, OP_BOOLAND,
+               OP_BOOLOR, OP_NUMEQUAL, OP_NUMEQUALVERIFY, OP_NUMNOTEQUAL, OP_LESSTHAN,
+               OP_LESSTHANOREQUAL, OP_GREATERTHAN, OP_GREATERTHANOREQUAL, OP_MIN, OP_MAX, OP_WITHIN,
+       }
+
+       for _, op := range numops {
+               cases = append(cases, testStruct{
+                       op: op,
+                       startVM: &virtualMachine{
+                               runLimit:  0,
+                               dataStack: [][]byte{{2}, {2}, {2}},
+                       },
+                       wantErr: ErrRunLimitExceeded,
+               })
+       }
+
+       for i, c := range cases {
+               err := ops[c.op].fn(c.startVM)
+
+               if err != c.wantErr {
+                       t.Errorf("case %d, op %s: got err = %v want %v", i, ops[c.op].name, err, c.wantErr)
+                       continue
+               }
+               if c.wantErr != nil {
+                       continue
+               }
+
+               if !testutil.DeepEqual(c.startVM, c.wantVM) {
+                       t.Errorf("case %d, op %s: unexpected vm result\n\tgot:  %+v\n\twant: %+v\n", i, ops[c.op].name, c.startVM, c.wantVM)
+               }
+       }
+}
+
+func TestRangeErrs(t *testing.T) {
+       cases := []struct {
+               prog           string
+               expectRangeErr bool
+       }{
+               {"0 1ADD", false},
+               {fmt.Sprintf("%d 1ADD", int64(math.MinInt64)), false},
+               {fmt.Sprintf("%d 1ADD", int64(math.MaxInt64)-1), false},
+               {fmt.Sprintf("%d 1ADD", int64(math.MaxInt64)), true},
+               {"0 1SUB", false},
+               {fmt.Sprintf("%d 1SUB", int64(math.MaxInt64)), false},
+               {fmt.Sprintf("%d 1SUB", int64(math.MinInt64)+1), false},
+               {fmt.Sprintf("%d 1SUB", int64(math.MinInt64)), true},
+               {"1 2MUL", false},
+               {fmt.Sprintf("%d 2MUL", int64(math.MaxInt64)/2-1), false},
+               {fmt.Sprintf("%d 2MUL", int64(math.MaxInt64)/2+1), true},
+               {fmt.Sprintf("%d 2MUL", int64(math.MinInt64)/2+1), false},
+               {fmt.Sprintf("%d 2MUL", int64(math.MinInt64)/2-1), true},
+               {"1 NEGATE", false},
+               {"-1 NEGATE", false},
+               {fmt.Sprintf("%d NEGATE", int64(math.MaxInt64)), false},
+               {fmt.Sprintf("%d NEGATE", int64(math.MinInt64)), true},
+               {"1 ABS", false},
+               {"-1 ABS", false},
+               {fmt.Sprintf("%d ABS", int64(math.MaxInt64)), false},
+               {fmt.Sprintf("%d ABS", int64(math.MinInt64)), true},
+               {"2 3 ADD", false},
+               {fmt.Sprintf("%d %d ADD", int64(math.MinInt64), int64(math.MaxInt64)), false},
+               {fmt.Sprintf("%d %d ADD", int64(math.MaxInt64)/2-1, int64(math.MaxInt64)/2-2), false},
+               {fmt.Sprintf("%d %d ADD", int64(math.MaxInt64)/2+1, int64(math.MaxInt64)/2+2), true},
+               {fmt.Sprintf("%d %d ADD", int64(math.MinInt64)/2+1, int64(math.MinInt64)/2+2), false},
+               {fmt.Sprintf("%d %d ADD", int64(math.MinInt64)/2-1, int64(math.MinInt64)/2-2), true},
+               {"2 3 SUB", false},
+               {fmt.Sprintf("1 %d SUB", int64(math.MaxInt64)), false},
+               {fmt.Sprintf("-1 %d SUB", int64(math.MinInt64)), false},
+               {fmt.Sprintf("1 %d SUB", int64(math.MinInt64)), true},
+               {fmt.Sprintf("-1 %d SUB", int64(math.MaxInt64)), false},
+               {fmt.Sprintf("-2 %d SUB", int64(math.MaxInt64)), true},
+               {"1 2 LSHIFT", false},
+               {"-1 2 LSHIFT", false},
+               {"-1 63 LSHIFT", false},
+               {"-1 64 LSHIFT", true},
+               {"0 64 LSHIFT", false},
+               {"1 62 LSHIFT", false},
+               {"1 63 LSHIFT", true},
+               {fmt.Sprintf("%d 0 LSHIFT", int64(math.MaxInt64)), false},
+               {fmt.Sprintf("%d 1 LSHIFT", int64(math.MaxInt64)), true},
+               {fmt.Sprintf("%d 1 LSHIFT", int64(math.MaxInt64)/2), false},
+               {fmt.Sprintf("%d 2 LSHIFT", int64(math.MaxInt64)/2), true},
+               {fmt.Sprintf("%d 0 LSHIFT", int64(math.MinInt64)), false},
+               {fmt.Sprintf("%d 1 LSHIFT", int64(math.MinInt64)), true},
+               {fmt.Sprintf("%d 1 LSHIFT", int64(math.MinInt64)/2), false},
+               {fmt.Sprintf("%d 2 LSHIFT", int64(math.MinInt64)/2), true},
+       }
+
+       for i, c := range cases {
+               prog, _ := Assemble(c.prog)
+               vm := &virtualMachine{
+                       program:  prog,
+                       runLimit: 50000,
+               }
+               err := vm.run()
+               switch err {
+               case nil:
+                       if c.expectRangeErr {
+                               t.Errorf("case %d (%s): expected range error, got none", i, c.prog)
+                       }
+               case ErrRange:
+                       if !c.expectRangeErr {
+                               t.Errorf("case %d (%s): got unexpected range error", i, c.prog)
+                       }
+               default:
+                       if c.expectRangeErr {
+                               t.Errorf("case %d (%s): expected range error, got %s", i, c.prog, err)
+                       } else {
+                               t.Errorf("case %d (%s): got unexpected error %s", i, c.prog, err)
+                       }
+               }
+       }
+}
diff --git a/protocol/vm/ops.go b/protocol/vm/ops.go
new file mode 100644 (file)
index 0000000..13e9e04
--- /dev/null
@@ -0,0 +1,491 @@
+package vm
+
+import (
+       "encoding/binary"
+       "fmt"
+       "math"
+
+       "chain/errors"
+       "chain/math/checked"
+)
+
+type Op uint8
+
+func (op Op) String() string {
+       return ops[op].name
+}
+
+type Instruction struct {
+       Op   Op
+       Len  uint32
+       Data []byte
+}
+
+const (
+       OP_FALSE Op = 0x00
+       OP_0     Op = 0x00 // synonym
+
+       OP_1    Op = 0x51
+       OP_TRUE Op = 0x51 // synonym
+
+       OP_2  Op = 0x52
+       OP_3  Op = 0x53
+       OP_4  Op = 0x54
+       OP_5  Op = 0x55
+       OP_6  Op = 0x56
+       OP_7  Op = 0x57
+       OP_8  Op = 0x58
+       OP_9  Op = 0x59
+       OP_10 Op = 0x5a
+       OP_11 Op = 0x5b
+       OP_12 Op = 0x5c
+       OP_13 Op = 0x5d
+       OP_14 Op = 0x5e
+       OP_15 Op = 0x5f
+       OP_16 Op = 0x60
+
+       OP_DATA_1  Op = 0x01
+       OP_DATA_2  Op = 0x02
+       OP_DATA_3  Op = 0x03
+       OP_DATA_4  Op = 0x04
+       OP_DATA_5  Op = 0x05
+       OP_DATA_6  Op = 0x06
+       OP_DATA_7  Op = 0x07
+       OP_DATA_8  Op = 0x08
+       OP_DATA_9  Op = 0x09
+       OP_DATA_10 Op = 0x0a
+       OP_DATA_11 Op = 0x0b
+       OP_DATA_12 Op = 0x0c
+       OP_DATA_13 Op = 0x0d
+       OP_DATA_14 Op = 0x0e
+       OP_DATA_15 Op = 0x0f
+       OP_DATA_16 Op = 0x10
+       OP_DATA_17 Op = 0x11
+       OP_DATA_18 Op = 0x12
+       OP_DATA_19 Op = 0x13
+       OP_DATA_20 Op = 0x14
+       OP_DATA_21 Op = 0x15
+       OP_DATA_22 Op = 0x16
+       OP_DATA_23 Op = 0x17
+       OP_DATA_24 Op = 0x18
+       OP_DATA_25 Op = 0x19
+       OP_DATA_26 Op = 0x1a
+       OP_DATA_27 Op = 0x1b
+       OP_DATA_28 Op = 0x1c
+       OP_DATA_29 Op = 0x1d
+       OP_DATA_30 Op = 0x1e
+       OP_DATA_31 Op = 0x1f
+       OP_DATA_32 Op = 0x20
+       OP_DATA_33 Op = 0x21
+       OP_DATA_34 Op = 0x22
+       OP_DATA_35 Op = 0x23
+       OP_DATA_36 Op = 0x24
+       OP_DATA_37 Op = 0x25
+       OP_DATA_38 Op = 0x26
+       OP_DATA_39 Op = 0x27
+       OP_DATA_40 Op = 0x28
+       OP_DATA_41 Op = 0x29
+       OP_DATA_42 Op = 0x2a
+       OP_DATA_43 Op = 0x2b
+       OP_DATA_44 Op = 0x2c
+       OP_DATA_45 Op = 0x2d
+       OP_DATA_46 Op = 0x2e
+       OP_DATA_47 Op = 0x2f
+       OP_DATA_48 Op = 0x30
+       OP_DATA_49 Op = 0x31
+       OP_DATA_50 Op = 0x32
+       OP_DATA_51 Op = 0x33
+       OP_DATA_52 Op = 0x34
+       OP_DATA_53 Op = 0x35
+       OP_DATA_54 Op = 0x36
+       OP_DATA_55 Op = 0x37
+       OP_DATA_56 Op = 0x38
+       OP_DATA_57 Op = 0x39
+       OP_DATA_58 Op = 0x3a
+       OP_DATA_59 Op = 0x3b
+       OP_DATA_60 Op = 0x3c
+       OP_DATA_61 Op = 0x3d
+       OP_DATA_62 Op = 0x3e
+       OP_DATA_63 Op = 0x3f
+       OP_DATA_64 Op = 0x40
+       OP_DATA_65 Op = 0x41
+       OP_DATA_66 Op = 0x42
+       OP_DATA_67 Op = 0x43
+       OP_DATA_68 Op = 0x44
+       OP_DATA_69 Op = 0x45
+       OP_DATA_70 Op = 0x46
+       OP_DATA_71 Op = 0x47
+       OP_DATA_72 Op = 0x48
+       OP_DATA_73 Op = 0x49
+       OP_DATA_74 Op = 0x4a
+       OP_DATA_75 Op = 0x4b
+
+       OP_PUSHDATA1 Op = 0x4c
+       OP_PUSHDATA2 Op = 0x4d
+       OP_PUSHDATA4 Op = 0x4e
+       OP_1NEGATE   Op = 0x4f
+       OP_NOP       Op = 0x61
+
+       OP_JUMP           Op = 0x63
+       OP_JUMPIF         Op = 0x64
+       OP_VERIFY         Op = 0x69
+       OP_FAIL           Op = 0x6a
+       OP_CHECKPREDICATE Op = 0xc0
+
+       OP_TOALTSTACK   Op = 0x6b
+       OP_FROMALTSTACK Op = 0x6c
+       OP_2DROP        Op = 0x6d
+       OP_2DUP         Op = 0x6e
+       OP_3DUP         Op = 0x6f
+       OP_2OVER        Op = 0x70
+       OP_2ROT         Op = 0x71
+       OP_2SWAP        Op = 0x72
+       OP_IFDUP        Op = 0x73
+       OP_DEPTH        Op = 0x74
+       OP_DROP         Op = 0x75
+       OP_DUP          Op = 0x76
+       OP_NIP          Op = 0x77
+       OP_OVER         Op = 0x78
+       OP_PICK         Op = 0x79
+       OP_ROLL         Op = 0x7a
+       OP_ROT          Op = 0x7b
+       OP_SWAP         Op = 0x7c
+       OP_TUCK         Op = 0x7d
+
+       OP_CAT         Op = 0x7e
+       OP_SUBSTR      Op = 0x7f
+       OP_LEFT        Op = 0x80
+       OP_RIGHT       Op = 0x81
+       OP_SIZE        Op = 0x82
+       OP_CATPUSHDATA Op = 0x89
+
+       OP_INVERT      Op = 0x83
+       OP_AND         Op = 0x84
+       OP_OR          Op = 0x85
+       OP_XOR         Op = 0x86
+       OP_EQUAL       Op = 0x87
+       OP_EQUALVERIFY Op = 0x88
+
+       OP_1ADD               Op = 0x8b
+       OP_1SUB               Op = 0x8c
+       OP_2MUL               Op = 0x8d
+       OP_2DIV               Op = 0x8e
+       OP_NEGATE             Op = 0x8f
+       OP_ABS                Op = 0x90
+       OP_NOT                Op = 0x91
+       OP_0NOTEQUAL          Op = 0x92
+       OP_ADD                Op = 0x93
+       OP_SUB                Op = 0x94
+       OP_MUL                Op = 0x95
+       OP_DIV                Op = 0x96
+       OP_MOD                Op = 0x97
+       OP_LSHIFT             Op = 0x98
+       OP_RSHIFT             Op = 0x99
+       OP_BOOLAND            Op = 0x9a
+       OP_BOOLOR             Op = 0x9b
+       OP_NUMEQUAL           Op = 0x9c
+       OP_NUMEQUALVERIFY     Op = 0x9d
+       OP_NUMNOTEQUAL        Op = 0x9e
+       OP_LESSTHAN           Op = 0x9f
+       OP_GREATERTHAN        Op = 0xa0
+       OP_LESSTHANOREQUAL    Op = 0xa1
+       OP_GREATERTHANOREQUAL Op = 0xa2
+       OP_MIN                Op = 0xa3
+       OP_MAX                Op = 0xa4
+       OP_WITHIN             Op = 0xa5
+
+       OP_SHA256        Op = 0xa8
+       OP_SHA3          Op = 0xaa
+       OP_CHECKSIG      Op = 0xac
+       OP_CHECKMULTISIG Op = 0xad
+       OP_TXSIGHASH     Op = 0xae
+       OP_BLOCKHASH     Op = 0xaf
+
+       OP_CHECKOUTPUT Op = 0xc1
+       OP_ASSET       Op = 0xc2
+       OP_AMOUNT      Op = 0xc3
+       OP_PROGRAM     Op = 0xc4
+       OP_MINTIME     Op = 0xc5
+       OP_MAXTIME     Op = 0xc6
+       OP_TXDATA      Op = 0xc7
+       OP_ENTRYDATA   Op = 0xc8
+       OP_INDEX       Op = 0xc9
+       OP_ENTRYID     Op = 0xca
+       OP_OUTPUTID    Op = 0xcb
+       OP_NONCE       Op = 0xcc
+       OP_NEXTPROGRAM Op = 0xcd
+       OP_BLOCKTIME   Op = 0xce
+)
+
+type opInfo struct {
+       op   Op
+       name string
+       fn   func(*virtualMachine) error
+}
+
+var (
+       ops = [256]opInfo{
+               // data pushing
+               OP_FALSE: {OP_FALSE, "FALSE", opFalse},
+
+               // sic: the PUSHDATA ops all share an implementation
+               OP_PUSHDATA1: {OP_PUSHDATA1, "PUSHDATA1", opPushdata},
+               OP_PUSHDATA2: {OP_PUSHDATA2, "PUSHDATA2", opPushdata},
+               OP_PUSHDATA4: {OP_PUSHDATA4, "PUSHDATA4", opPushdata},
+
+               OP_1NEGATE: {OP_1NEGATE, "1NEGATE", op1Negate},
+
+               OP_NOP: {OP_NOP, "NOP", opNop},
+
+               // control flow
+               OP_JUMP:   {OP_JUMP, "JUMP", opJump},
+               OP_JUMPIF: {OP_JUMPIF, "JUMPIF", opJumpIf},
+
+               OP_VERIFY: {OP_VERIFY, "VERIFY", opVerify},
+               OP_FAIL:   {OP_FAIL, "FAIL", opFail},
+
+               OP_TOALTSTACK:   {OP_TOALTSTACK, "TOALTSTACK", opToAltStack},
+               OP_FROMALTSTACK: {OP_FROMALTSTACK, "FROMALTSTACK", opFromAltStack},
+               OP_2DROP:        {OP_2DROP, "2DROP", op2Drop},
+               OP_2DUP:         {OP_2DUP, "2DUP", op2Dup},
+               OP_3DUP:         {OP_3DUP, "3DUP", op3Dup},
+               OP_2OVER:        {OP_2OVER, "2OVER", op2Over},
+               OP_2ROT:         {OP_2ROT, "2ROT", op2Rot},
+               OP_2SWAP:        {OP_2SWAP, "2SWAP", op2Swap},
+               OP_IFDUP:        {OP_IFDUP, "IFDUP", opIfDup},
+               OP_DEPTH:        {OP_DEPTH, "DEPTH", opDepth},
+               OP_DROP:         {OP_DROP, "DROP", opDrop},
+               OP_DUP:          {OP_DUP, "DUP", opDup},
+               OP_NIP:          {OP_NIP, "NIP", opNip},
+               OP_OVER:         {OP_OVER, "OVER", opOver},
+               OP_PICK:         {OP_PICK, "PICK", opPick},
+               OP_ROLL:         {OP_ROLL, "ROLL", opRoll},
+               OP_ROT:          {OP_ROT, "ROT", opRot},
+               OP_SWAP:         {OP_SWAP, "SWAP", opSwap},
+               OP_TUCK:         {OP_TUCK, "TUCK", opTuck},
+
+               OP_CAT:         {OP_CAT, "CAT", opCat},
+               OP_SUBSTR:      {OP_SUBSTR, "SUBSTR", opSubstr},
+               OP_LEFT:        {OP_LEFT, "LEFT", opLeft},
+               OP_RIGHT:       {OP_RIGHT, "RIGHT", opRight},
+               OP_SIZE:        {OP_SIZE, "SIZE", opSize},
+               OP_CATPUSHDATA: {OP_CATPUSHDATA, "CATPUSHDATA", opCatpushdata},
+
+               OP_INVERT:      {OP_INVERT, "INVERT", opInvert},
+               OP_AND:         {OP_AND, "AND", opAnd},
+               OP_OR:          {OP_OR, "OR", opOr},
+               OP_XOR:         {OP_XOR, "XOR", opXor},
+               OP_EQUAL:       {OP_EQUAL, "EQUAL", opEqual},
+               OP_EQUALVERIFY: {OP_EQUALVERIFY, "EQUALVERIFY", opEqualVerify},
+
+               OP_1ADD:               {OP_1ADD, "1ADD", op1Add},
+               OP_1SUB:               {OP_1SUB, "1SUB", op1Sub},
+               OP_2MUL:               {OP_2MUL, "2MUL", op2Mul},
+               OP_2DIV:               {OP_2DIV, "2DIV", op2Div},
+               OP_NEGATE:             {OP_NEGATE, "NEGATE", opNegate},
+               OP_ABS:                {OP_ABS, "ABS", opAbs},
+               OP_NOT:                {OP_NOT, "NOT", opNot},
+               OP_0NOTEQUAL:          {OP_0NOTEQUAL, "0NOTEQUAL", op0NotEqual},
+               OP_ADD:                {OP_ADD, "ADD", opAdd},
+               OP_SUB:                {OP_SUB, "SUB", opSub},
+               OP_MUL:                {OP_MUL, "MUL", opMul},
+               OP_DIV:                {OP_DIV, "DIV", opDiv},
+               OP_MOD:                {OP_MOD, "MOD", opMod},
+               OP_LSHIFT:             {OP_LSHIFT, "LSHIFT", opLshift},
+               OP_RSHIFT:             {OP_RSHIFT, "RSHIFT", opRshift},
+               OP_BOOLAND:            {OP_BOOLAND, "BOOLAND", opBoolAnd},
+               OP_BOOLOR:             {OP_BOOLOR, "BOOLOR", opBoolOr},
+               OP_NUMEQUAL:           {OP_NUMEQUAL, "NUMEQUAL", opNumEqual},
+               OP_NUMEQUALVERIFY:     {OP_NUMEQUALVERIFY, "NUMEQUALVERIFY", opNumEqualVerify},
+               OP_NUMNOTEQUAL:        {OP_NUMNOTEQUAL, "NUMNOTEQUAL", opNumNotEqual},
+               OP_LESSTHAN:           {OP_LESSTHAN, "LESSTHAN", opLessThan},
+               OP_GREATERTHAN:        {OP_GREATERTHAN, "GREATERTHAN", opGreaterThan},
+               OP_LESSTHANOREQUAL:    {OP_LESSTHANOREQUAL, "LESSTHANOREQUAL", opLessThanOrEqual},
+               OP_GREATERTHANOREQUAL: {OP_GREATERTHANOREQUAL, "GREATERTHANOREQUAL", opGreaterThanOrEqual},
+               OP_MIN:                {OP_MIN, "MIN", opMin},
+               OP_MAX:                {OP_MAX, "MAX", opMax},
+               OP_WITHIN:             {OP_WITHIN, "WITHIN", opWithin},
+
+               OP_SHA256:        {OP_SHA256, "SHA256", opSha256},
+               OP_SHA3:          {OP_SHA3, "SHA3", opSha3},
+               OP_CHECKSIG:      {OP_CHECKSIG, "CHECKSIG", opCheckSig},
+               OP_CHECKMULTISIG: {OP_CHECKMULTISIG, "CHECKMULTISIG", opCheckMultiSig},
+               OP_TXSIGHASH:     {OP_TXSIGHASH, "TXSIGHASH", opTxSigHash},
+               OP_BLOCKHASH:     {OP_BLOCKHASH, "BLOCKHASH", opBlockHash},
+
+               OP_CHECKOUTPUT: {OP_CHECKOUTPUT, "CHECKOUTPUT", opCheckOutput},
+               OP_ASSET:       {OP_ASSET, "ASSET", opAsset},
+               OP_AMOUNT:      {OP_AMOUNT, "AMOUNT", opAmount},
+               OP_PROGRAM:     {OP_PROGRAM, "PROGRAM", opProgram},
+               OP_MINTIME:     {OP_MINTIME, "MINTIME", opMinTime},
+               OP_MAXTIME:     {OP_MAXTIME, "MAXTIME", opMaxTime},
+               OP_TXDATA:      {OP_TXDATA, "TXDATA", opTxData},
+               OP_ENTRYDATA:   {OP_ENTRYDATA, "ENTRYDATA", opEntryData},
+               OP_INDEX:       {OP_INDEX, "INDEX", opIndex},
+               OP_ENTRYID:     {OP_ENTRYID, "ENTRYID", opEntryID},
+               OP_OUTPUTID:    {OP_OUTPUTID, "OUTPUTID", opOutputID},
+               OP_NONCE:       {OP_NONCE, "NONCE", opNonce},
+               OP_NEXTPROGRAM: {OP_NEXTPROGRAM, "NEXTPROGRAM", opNextProgram},
+               OP_BLOCKTIME:   {OP_BLOCKTIME, "BLOCKTIME", opBlockTime},
+       }
+
+       opsByName map[string]opInfo
+)
+
+// ParseOp parses the op at position pc in prog, returning the parsed
+// instruction (opcode plus any associated data).
+func ParseOp(prog []byte, pc uint32) (inst Instruction, err error) {
+       if len(prog) > math.MaxInt32 {
+               err = ErrLongProgram
+       }
+       l := uint32(len(prog))
+       if pc >= l {
+               err = ErrShortProgram
+               return
+       }
+       opcode := Op(prog[pc])
+       inst.Op = opcode
+       inst.Len = 1
+       if opcode >= OP_1 && opcode <= OP_16 {
+               inst.Data = []byte{uint8(opcode-OP_1) + 1}
+               return
+       }
+       if opcode >= OP_DATA_1 && opcode <= OP_DATA_75 {
+               inst.Len += uint32(opcode - OP_DATA_1 + 1)
+               end, ok := checked.AddUint32(pc, inst.Len)
+               if !ok {
+                       err = errors.WithDetail(checked.ErrOverflow, "data length exceeds max program size")
+                       return
+               }
+               if end > l {
+                       err = ErrShortProgram
+                       return
+               }
+               inst.Data = prog[pc+1 : end]
+               return
+       }
+       if opcode == OP_PUSHDATA1 {
+               if pc == l-1 {
+                       err = ErrShortProgram
+                       return
+               }
+               n := prog[pc+1]
+               inst.Len += uint32(n) + 1
+               end, ok := checked.AddUint32(pc, inst.Len)
+               if !ok {
+                       err = errors.WithDetail(checked.ErrOverflow, "data length exceeds max program size")
+               }
+               if end > l {
+                       err = ErrShortProgram
+                       return
+               }
+               inst.Data = prog[pc+2 : end]
+               return
+       }
+       if opcode == OP_PUSHDATA2 {
+               if len(prog) < 3 || pc > l-3 {
+                       err = ErrShortProgram
+                       return
+               }
+               n := binary.LittleEndian.Uint16(prog[pc+1 : pc+3])
+               inst.Len += uint32(n) + 2
+               end, ok := checked.AddUint32(pc, inst.Len)
+               if !ok {
+                       err = errors.WithDetail(checked.ErrOverflow, "data length exceeds max program size")
+                       return
+               }
+               if end > l {
+                       err = ErrShortProgram
+                       return
+               }
+               inst.Data = prog[pc+3 : end]
+               return
+       }
+       if opcode == OP_PUSHDATA4 {
+               if len(prog) < 5 || pc > l-5 {
+                       err = ErrShortProgram
+                       return
+               }
+               inst.Len += 4
+
+               n := binary.LittleEndian.Uint32(prog[pc+1 : pc+5])
+               var ok bool
+               inst.Len, ok = checked.AddUint32(inst.Len, n)
+               if !ok {
+                       err = errors.WithDetail(checked.ErrOverflow, "data length exceeds max program size")
+                       return
+               }
+               end, ok := checked.AddUint32(pc, inst.Len)
+               if !ok {
+                       err = errors.WithDetail(checked.ErrOverflow, "data length exceeds max program size")
+                       return
+               }
+               if end > l {
+                       err = ErrShortProgram
+                       return
+               }
+               inst.Data = prog[pc+5 : end]
+               return
+       }
+       if opcode == OP_JUMP || opcode == OP_JUMPIF {
+               inst.Len += 4
+               end, ok := checked.AddUint32(pc, inst.Len)
+               if !ok {
+                       err = errors.WithDetail(checked.ErrOverflow, "jump target exceeds max program size")
+                       return
+               }
+               if end > l {
+                       err = ErrShortProgram
+                       return
+               }
+               inst.Data = prog[pc+1 : end]
+               return
+       }
+       return
+}
+
+func ParseProgram(prog []byte) ([]Instruction, error) {
+       var result []Instruction
+       for pc := uint32(0); pc < uint32(len(prog)); { // update pc inside the loop
+               inst, err := ParseOp(prog, pc)
+               if err != nil {
+                       return nil, err
+               }
+               result = append(result, inst)
+               var ok bool
+               pc, ok = checked.AddUint32(pc, inst.Len)
+               if !ok {
+                       return nil, errors.WithDetail(checked.ErrOverflow, "program counter exceeds max program size")
+               }
+       }
+       return result, nil
+}
+
+var isExpansion [256]bool
+
+func init() {
+       for i := 1; i <= 75; i++ {
+               ops[i] = opInfo{Op(i), fmt.Sprintf("DATA_%d", i), opPushdata}
+       }
+       for i := uint8(0); i <= 15; i++ {
+               op := uint8(OP_1) + i
+               ops[op] = opInfo{Op(op), fmt.Sprintf("%d", i+1), opPushdata}
+       }
+
+       // This is here to break a dependency cycle
+       ops[OP_CHECKPREDICATE] = opInfo{OP_CHECKPREDICATE, "CHECKPREDICATE", opCheckPredicate}
+
+       opsByName = make(map[string]opInfo)
+       for _, info := range ops {
+               opsByName[info.name] = info
+       }
+       opsByName["0"] = ops[OP_FALSE]
+       opsByName["TRUE"] = ops[OP_1]
+
+       for i := 0; i <= 255; i++ {
+               if ops[i].name == "" {
+                       ops[i] = opInfo{Op(i), fmt.Sprintf("NOPx%02x", i), opNop}
+                       isExpansion[i] = true
+               }
+       }
+}
diff --git a/protocol/vm/ops_test.go b/protocol/vm/ops_test.go
new file mode 100644 (file)
index 0000000..50f7827
--- /dev/null
@@ -0,0 +1,140 @@
+package vm
+
+import (
+       "fmt"
+       "testing"
+
+       "chain/errors"
+       "chain/math/checked"
+       "chain/testutil"
+)
+
+func TestParseOp(t *testing.T) {
+       cases := []struct {
+               prog    []byte
+               pc      uint32
+               want    Instruction
+               wantErr error
+       }{{
+               prog: []byte{byte(OP_ADD)},
+               want: Instruction{Op: OP_ADD, Len: 1},
+       }, {
+               prog: []byte{byte(OP_16)},
+               want: Instruction{Op: OP_16, Data: []byte{16}, Len: 1},
+       }, {
+               prog: []byte{byte(OP_DATA_5), 1, 1, 1, 1, 1},
+               want: Instruction{Op: OP_DATA_5, Data: []byte{1, 1, 1, 1, 1}, Len: 6},
+       }, {
+               prog: []byte{byte(OP_DATA_5), 1, 1, 1, 1, 1, 255},
+               want: Instruction{Op: OP_DATA_5, Data: []byte{1, 1, 1, 1, 1}, Len: 6},
+       }, {
+               prog: []byte{byte(OP_PUSHDATA1), 1, 1},
+               want: Instruction{Op: OP_PUSHDATA1, Data: []byte{1}, Len: 3},
+       }, {
+               prog: []byte{byte(OP_PUSHDATA1), 1, 1, 255},
+               want: Instruction{Op: OP_PUSHDATA1, Data: []byte{1}, Len: 3},
+       }, {
+               prog: []byte{byte(OP_PUSHDATA2), 1, 0, 1},
+               want: Instruction{Op: OP_PUSHDATA2, Data: []byte{1}, Len: 4},
+       }, {
+               prog: []byte{byte(OP_PUSHDATA2), 1, 0, 1, 255},
+               want: Instruction{Op: OP_PUSHDATA2, Data: []byte{1}, Len: 4},
+       }, {
+               prog: []byte{byte(OP_PUSHDATA4), 1, 0, 0, 0, 1},
+               want: Instruction{Op: OP_PUSHDATA4, Data: []byte{1}, Len: 6},
+       }, {
+               prog: []byte{byte(OP_PUSHDATA4), 1, 0, 0, 0, 1, 255},
+               want: Instruction{Op: OP_PUSHDATA4, Data: []byte{1}, Len: 6},
+       }, {
+               prog:    []byte{},
+               wantErr: ErrShortProgram,
+       }, {
+               prog:    []byte{byte(OP_0)},
+               pc:      1,
+               wantErr: ErrShortProgram,
+       }, {
+               prog:    []byte{byte(OP_DATA_1)},
+               wantErr: ErrShortProgram,
+       }, {
+               prog:    []byte{byte(OP_PUSHDATA1)},
+               wantErr: ErrShortProgram,
+       }, {
+               prog:    []byte{byte(OP_PUSHDATA1), 1},
+               wantErr: ErrShortProgram,
+       }, {
+               prog:    []byte{byte(OP_PUSHDATA2)},
+               wantErr: ErrShortProgram,
+       }, {
+               prog:    []byte{byte(OP_PUSHDATA2), 1, 0},
+               wantErr: ErrShortProgram,
+       }, {
+               prog:    []byte{byte(OP_PUSHDATA4)},
+               wantErr: ErrShortProgram,
+       }, {
+               prog:    []byte{byte(OP_PUSHDATA4), 1, 0, 0, 0},
+               wantErr: ErrShortProgram,
+       }, {
+               pc:      71,
+               prog:    []byte{0x6d, 0x6b, 0xaa, 0x20, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x20, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x0, 0x0, 0x4e, 0xff, 0xff, 0xff, 0xff, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30},
+               wantErr: checked.ErrOverflow,
+       }}
+
+       for _, c := range cases {
+               t.Run(fmt.Sprintf("%d: %x", c.pc, c.prog), func(t *testing.T) {
+                       got, gotErr := ParseOp(c.prog, c.pc)
+
+                       if errors.Root(gotErr) != c.wantErr {
+                               t.Errorf("ParseOp(%x, %d) error = %v want %v", c.prog, c.pc, gotErr, c.wantErr)
+                       }
+
+                       if c.wantErr != nil {
+                               return
+                       }
+
+                       if !testutil.DeepEqual(got, c.want) {
+                               t.Errorf("ParseOp(%x, %d) = %+v want %+v", c.prog, c.pc, got, c.want)
+                       }
+               })
+       }
+}
+
+func TestParseProgram(t *testing.T) {
+       cases := []struct {
+               prog    []byte
+               want    []Instruction
+               wantErr error
+       }{
+               {
+                       prog: []byte{byte(OP_2), byte(OP_3), byte(OP_ADD), byte(OP_5), byte(OP_NUMEQUAL)},
+                       want: []Instruction{
+                               {Op: OP_2, Data: []byte{0x02}, Len: 1},
+                               {Op: OP_3, Data: []byte{0x03}, Len: 1},
+                               {Op: OP_ADD, Len: 1},
+                               {Op: OP_5, Data: []byte{0x05}, Len: 1},
+                               {Op: OP_NUMEQUAL, Len: 1},
+                       },
+               },
+               {
+                       prog: []byte{255},
+                       want: []Instruction{
+                               {Op: 255, Len: 1},
+                       },
+               },
+       }
+
+       for _, c := range cases {
+               got, gotErr := ParseProgram(c.prog)
+
+               if errors.Root(gotErr) != c.wantErr {
+                       t.Errorf("ParseProgram(%x) error = %v want %v", c.prog, gotErr, c.wantErr)
+               }
+
+               if c.wantErr != nil {
+                       continue
+               }
+
+               if !testutil.DeepEqual(got, c.want) {
+                       t.Errorf("ParseProgram(%x) = %+v want %+v", c.prog, got, c.want)
+               }
+       }
+}
diff --git a/protocol/vm/pushdata.go b/protocol/vm/pushdata.go
new file mode 100644 (file)
index 0000000..0edf329
--- /dev/null
@@ -0,0 +1,64 @@
+package vm
+
+import "encoding/binary"
+
+func opFalse(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+       return vm.pushBool(false, false)
+}
+
+func opPushdata(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+       d := make([]byte, len(vm.data))
+       copy(d, vm.data)
+       return vm.push(d, false)
+}
+
+func op1Negate(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+       return vm.pushInt64(-1, false)
+}
+
+func opNop(vm *virtualMachine) error {
+       return vm.applyCost(1)
+}
+
+func PushdataBytes(in []byte) []byte {
+       l := len(in)
+       if l == 0 {
+               return []byte{byte(OP_0)}
+       }
+       if l <= 75 {
+               return append([]byte{byte(OP_DATA_1) + uint8(l) - 1}, in...)
+       }
+       if l < 1<<8 {
+               return append([]byte{byte(OP_PUSHDATA1), uint8(l)}, in...)
+       }
+       if l < 1<<16 {
+               var b [2]byte
+               binary.LittleEndian.PutUint16(b[:], uint16(l))
+               return append([]byte{byte(OP_PUSHDATA2), b[0], b[1]}, in...)
+       }
+       var b [4]byte
+       binary.LittleEndian.PutUint32(b[:], uint32(l))
+       return append([]byte{byte(OP_PUSHDATA4), b[0], b[1], b[2], b[3]}, in...)
+}
+
+func PushdataInt64(n int64) []byte {
+       if n == 0 {
+               return []byte{byte(OP_0)}
+       }
+       if n >= 1 && n <= 16 {
+               return []byte{uint8(OP_1) + uint8(n) - 1}
+       }
+       return PushdataBytes(Int64Bytes(n))
+}
diff --git a/protocol/vm/pushdata_test.go b/protocol/vm/pushdata_test.go
new file mode 100644 (file)
index 0000000..1f5a034
--- /dev/null
@@ -0,0 +1,188 @@
+package vm
+
+import (
+       "bytes"
+       "testing"
+
+       "chain/testutil"
+)
+
+func TestPushdataOps(t *testing.T) {
+       type testStruct struct {
+               op      Op
+               startVM *virtualMachine
+               wantErr error
+               wantVM  *virtualMachine
+       }
+       cases := []testStruct{{
+               op: OP_FALSE,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49991,
+                       dataStack: [][]byte{{}},
+               },
+       }, {
+               op: OP_FALSE,
+               startVM: &virtualMachine{
+                       runLimit:  1,
+                       dataStack: [][]byte{},
+               },
+               wantErr: ErrRunLimitExceeded,
+       }, {
+               op: OP_1NEGATE,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49983,
+                       dataStack: [][]byte{Int64Bytes(-1)},
+               },
+       }, {
+               op: OP_1NEGATE,
+               startVM: &virtualMachine{
+                       runLimit:  1,
+                       dataStack: [][]byte{},
+               },
+               wantErr: ErrRunLimitExceeded,
+       }}
+
+       pushdataops := []Op{OP_PUSHDATA1, OP_PUSHDATA2, OP_PUSHDATA4}
+       for i := 1; i <= 75; i++ {
+               pushdataops = append(pushdataops, Op(i))
+       }
+       for _, op := range pushdataops {
+               cases = append(cases, testStruct{
+                       op: op,
+                       startVM: &virtualMachine{
+                               runLimit:  50000,
+                               dataStack: [][]byte{},
+                               data:      []byte("data"),
+                       },
+                       wantVM: &virtualMachine{
+                               runLimit:  49987,
+                               dataStack: [][]byte{[]byte("data")},
+                               data:      []byte("data"),
+                       },
+               }, testStruct{
+                       op: op,
+                       startVM: &virtualMachine{
+                               runLimit:  1,
+                               dataStack: [][]byte{},
+                               data:      []byte("data"),
+                       },
+                       wantErr: ErrRunLimitExceeded,
+               })
+       }
+
+       pushops := append(pushdataops, OP_FALSE, OP_1NEGATE, OP_NOP)
+       for _, op := range pushops {
+               cases = append(cases, testStruct{
+                       op: op,
+                       startVM: &virtualMachine{
+                               runLimit:  0,
+                               dataStack: [][]byte{},
+                       },
+                       wantErr: ErrRunLimitExceeded,
+               })
+       }
+
+       for i, c := range cases {
+               err := ops[c.op].fn(c.startVM)
+
+               if err != c.wantErr {
+                       t.Errorf("case %d, op %s: got err = %v want %v", i, ops[c.op].name, err, c.wantErr)
+                       continue
+               }
+               if c.wantErr != nil {
+                       continue
+               }
+
+               if !testutil.DeepEqual(c.startVM, c.wantVM) {
+                       t.Errorf("case %d, op %s: unexpected vm result\n\tgot:  %+v\n\twant: %+v\n", i, ops[c.op].name, c.startVM, c.wantVM)
+               }
+       }
+}
+
+func TestPushDataBytes(t *testing.T) {
+       type test struct {
+               data []byte
+               want []byte
+       }
+       cases := []test{{
+               data: nil,
+               want: []byte{byte(OP_0)},
+       }, {
+               data: make([]byte, 255),
+               want: append([]byte{byte(OP_PUSHDATA1), 0xff}, make([]byte, 255)...),
+       }, {
+               data: make([]byte, 1<<8),
+               want: append([]byte{byte(OP_PUSHDATA2), 0, 1}, make([]byte, 1<<8)...),
+       }, {
+               data: make([]byte, 1<<16),
+               want: append([]byte{byte(OP_PUSHDATA4), 0, 0, 1, 0}, make([]byte, 1<<16)...),
+       }}
+
+       for i := 1; i <= 75; i++ {
+               cases = append(cases, test{
+                       data: make([]byte, i),
+                       want: append([]byte{byte(OP_DATA_1) - 1 + byte(i)}, make([]byte, i)...),
+               })
+       }
+
+       for _, c := range cases {
+               got := PushdataBytes(c.data)
+
+               dl := len(c.data)
+               if dl > 10 {
+                       dl = 10
+               }
+               if !bytes.Equal(got, c.want) {
+                       t.Errorf("PushdataBytes(%x...) = %x...[%d] want %x...[%d]", c.data[:dl], got[:dl], len(got), c.want[:dl], len(c.want))
+               }
+       }
+}
+
+func TestPushdataInt64(t *testing.T) {
+       type test struct {
+               num  int64
+               want []byte
+       }
+       cases := []test{{
+               num:  0,
+               want: []byte{byte(OP_0)},
+       }, {
+               num:  17,
+               want: []byte{byte(OP_DATA_1), 0x11},
+       }, {
+               num:  255,
+               want: []byte{byte(OP_DATA_1), 0xff},
+       }, {
+               num:  256,
+               want: []byte{byte(OP_DATA_2), 0x00, 0x01},
+       }, {
+               num:  -1,
+               want: []byte{byte(OP_DATA_8), 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+       }, {
+               num:  -2,
+               want: []byte{byte(OP_DATA_8), 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+       }}
+
+       for i := 1; i <= 16; i++ {
+               cases = append(cases, test{
+                       num:  int64(i),
+                       want: []byte{byte(OP_1) - 1 + byte(i)},
+               })
+       }
+
+       for _, c := range cases {
+               got := PushdataInt64(c.num)
+
+               if !bytes.Equal(got, c.want) {
+                       t.Errorf("PushdataInt64(%d) = %x want %x", c.num, got, c.want)
+               }
+       }
+}
diff --git a/protocol/vm/splice.go b/protocol/vm/splice.go
new file mode 100644 (file)
index 0000000..976acdb
--- /dev/null
@@ -0,0 +1,170 @@
+package vm
+
+import "chain/math/checked"
+
+func opCat(vm *virtualMachine) error {
+       err := vm.applyCost(4)
+       if err != nil {
+               return err
+       }
+       b, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       a, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       lens := int64(len(a) + len(b))
+       err = vm.applyCost(lens)
+       if err != nil {
+               return err
+       }
+       vm.deferCost(-lens)
+       err = vm.push(append(a, b...), true)
+       if err != nil {
+               return err
+       }
+       return nil
+}
+
+func opSubstr(vm *virtualMachine) error {
+       err := vm.applyCost(4)
+       if err != nil {
+               return err
+       }
+       size, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       if size < 0 {
+               return ErrBadValue
+       }
+       err = vm.applyCost(size)
+       if err != nil {
+               return err
+       }
+       vm.deferCost(-size)
+       offset, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       if offset < 0 {
+               return ErrBadValue
+       }
+       str, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       end, ok := checked.AddInt64(offset, size)
+       if !ok || end > int64(len(str)) {
+               return ErrBadValue
+       }
+       err = vm.push(str[offset:end], true)
+       if err != nil {
+               return err
+       }
+       return nil
+}
+
+func opLeft(vm *virtualMachine) error {
+       err := vm.applyCost(4)
+       if err != nil {
+               return err
+       }
+       size, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       if size < 0 {
+               return ErrBadValue
+       }
+       err = vm.applyCost(size)
+       if err != nil {
+               return err
+       }
+       vm.deferCost(-size)
+       str, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       if size > int64(len(str)) {
+               return ErrBadValue
+       }
+       err = vm.push(str[:size], true)
+       if err != nil {
+               return err
+       }
+       return nil
+}
+
+func opRight(vm *virtualMachine) error {
+       err := vm.applyCost(4)
+       if err != nil {
+               return err
+       }
+       size, err := vm.popInt64(true)
+       if err != nil {
+               return err
+       }
+       if size < 0 {
+               return ErrBadValue
+       }
+       err = vm.applyCost(size)
+       if err != nil {
+               return err
+       }
+       vm.deferCost(-size)
+       str, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       lstr := int64(len(str))
+       if size > lstr {
+               return ErrBadValue
+       }
+       err = vm.push(str[lstr-size:], true)
+       if err != nil {
+               return err
+       }
+       return nil
+}
+
+func opSize(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+       str, err := vm.top()
+       if err != nil {
+               return err
+       }
+       err = vm.pushInt64(int64(len(str)), true)
+       if err != nil {
+               return err
+       }
+       return nil
+}
+
+func opCatpushdata(vm *virtualMachine) error {
+       err := vm.applyCost(4)
+       if err != nil {
+               return err
+       }
+       b, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       a, err := vm.pop(true)
+       if err != nil {
+               return err
+       }
+       lb := len(b)
+       lens := int64(len(a) + lb)
+       err = vm.applyCost(lens)
+       if err != nil {
+               return err
+       }
+       vm.deferCost(-lens)
+       return vm.push(append(a, PushdataBytes(b)...), true)
+}
diff --git a/protocol/vm/splice_test.go b/protocol/vm/splice_test.go
new file mode 100644 (file)
index 0000000..5758be1
--- /dev/null
@@ -0,0 +1,192 @@
+package vm
+
+import (
+       "testing"
+
+       "chain/testutil"
+)
+
+func TestSpliceOps(t *testing.T) {
+       type testStruct struct {
+               op      Op
+               startVM *virtualMachine
+               wantErr error
+               wantVM  *virtualMachine
+       }
+       cases := []testStruct{{
+               op: OP_CAT,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{[]byte("hello"), []byte("world")},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49986,
+                       deferredCost: -18,
+                       dataStack:    [][]byte{[]byte("helloworld")},
+               },
+       }, {
+               op: OP_CAT,
+               startVM: &virtualMachine{
+                       runLimit:  4,
+                       dataStack: [][]byte{[]byte("hello"), []byte("world")},
+               },
+               wantErr: ErrRunLimitExceeded,
+       }, {
+               op: OP_SUBSTR,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{[]byte("helloworld"), {3}, {5}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49991,
+                       deferredCost: -28,
+                       dataStack:    [][]byte{[]byte("lowor")},
+               },
+       }, {
+               op: OP_SUBSTR,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{[]byte("helloworld"), {3}, Int64Bytes(-1)},
+               },
+               wantErr: ErrBadValue,
+       }, {
+               op: OP_SUBSTR,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{[]byte("helloworld"), Int64Bytes(-1), {5}},
+               },
+               wantErr: ErrBadValue,
+       }, {
+               op: OP_SUBSTR,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{[]byte("helloworld"), {6}, {5}},
+               },
+               wantErr: ErrBadValue,
+       }, {
+               op: OP_SUBSTR,
+               startVM: &virtualMachine{
+                       runLimit:  4,
+                       dataStack: [][]byte{[]byte("helloworld"), {3}, {5}},
+               },
+               wantErr: ErrRunLimitExceeded,
+       }, {
+               op: OP_LEFT,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{[]byte("helloworld"), {5}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49991,
+                       deferredCost: -19,
+                       dataStack:    [][]byte{[]byte("hello")},
+               },
+       }, {
+               op: OP_LEFT,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{[]byte("helloworld"), Int64Bytes(-1)},
+               },
+               wantErr: ErrBadValue,
+       }, {
+               op: OP_LEFT,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{[]byte("helloworld"), {11}},
+               },
+               wantErr: ErrBadValue,
+       }, {
+               op: OP_LEFT,
+               startVM: &virtualMachine{
+                       runLimit:  4,
+                       dataStack: [][]byte{[]byte("helloworld"), {5}},
+               },
+               wantErr: ErrRunLimitExceeded,
+       }, {
+               op: OP_RIGHT,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{[]byte("helloworld"), {5}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49991,
+                       deferredCost: -19,
+                       dataStack:    [][]byte{[]byte("world")},
+               },
+       }, {
+               op: OP_RIGHT,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{[]byte("helloworld"), Int64Bytes(-1)},
+               },
+               wantErr: ErrBadValue,
+       }, {
+               op: OP_RIGHT,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{[]byte("helloworld"), {11}},
+               },
+               wantErr: ErrBadValue,
+       }, {
+               op: OP_RIGHT,
+               startVM: &virtualMachine{
+                       runLimit:  4,
+                       dataStack: [][]byte{[]byte("helloworld"), {5}},
+               },
+               wantErr: ErrRunLimitExceeded,
+       }, {
+               op: OP_SIZE,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{[]byte("helloworld")},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49999,
+                       deferredCost: 9,
+                       dataStack:    [][]byte{[]byte("helloworld"), {10}},
+               },
+       }, {
+               op: OP_CATPUSHDATA,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{0xff}, {0xab, 0xcd}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:     49993,
+                       deferredCost: -10,
+                       dataStack:    [][]byte{{0xff, 0x02, 0xab, 0xcd}},
+               },
+       }, {
+               op: OP_CATPUSHDATA,
+               startVM: &virtualMachine{
+                       runLimit:  4,
+                       dataStack: [][]byte{{0xff}, {0xab, 0xcd}},
+               },
+               wantErr: ErrRunLimitExceeded,
+       }}
+
+       spliceops := []Op{OP_CAT, OP_SUBSTR, OP_LEFT, OP_RIGHT, OP_CATPUSHDATA, OP_SIZE}
+       for _, op := range spliceops {
+               cases = append(cases, testStruct{
+                       op:      op,
+                       startVM: &virtualMachine{runLimit: 0},
+                       wantErr: ErrRunLimitExceeded,
+               })
+       }
+
+       for i, c := range cases {
+               err := ops[c.op].fn(c.startVM)
+
+               if err != c.wantErr {
+                       t.Errorf("case %d, op %s: got err = %v want %v", i, ops[c.op].name, err, c.wantErr)
+                       continue
+               }
+               if c.wantErr != nil {
+                       continue
+               }
+
+               if !testutil.DeepEqual(c.startVM, c.wantVM) {
+                       t.Errorf("case %d, op %s: unexpected vm result\n\tgot:  %+v\n\twant: %+v\n", i, ops[c.op].name, c.startVM, c.wantVM)
+               }
+       }
+}
diff --git a/protocol/vm/stack.go b/protocol/vm/stack.go
new file mode 100644 (file)
index 0000000..47e2ba8
--- /dev/null
@@ -0,0 +1,312 @@
+package vm
+
+import "chain/math/checked"
+
+func opToAltStack(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       if len(vm.dataStack) == 0 {
+               return ErrDataStackUnderflow
+       }
+       // no standard memory cost accounting here
+       vm.altStack = append(vm.altStack, vm.dataStack[len(vm.dataStack)-1])
+       vm.dataStack = vm.dataStack[:len(vm.dataStack)-1]
+       return nil
+}
+
+func opFromAltStack(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       if len(vm.altStack) == 0 {
+               return ErrAltStackUnderflow
+       }
+       // no standard memory cost accounting here
+       vm.dataStack = append(vm.dataStack, vm.altStack[len(vm.altStack)-1])
+       vm.altStack = vm.altStack[:len(vm.altStack)-1]
+       return nil
+}
+
+func op2Drop(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       for i := 0; i < 2; i++ {
+               _, err = vm.pop(false)
+               if err != nil {
+                       return err
+               }
+       }
+       return nil
+}
+
+func op2Dup(vm *virtualMachine) error {
+       return nDup(vm, 2)
+}
+
+func op3Dup(vm *virtualMachine) error {
+       return nDup(vm, 3)
+}
+
+func nDup(vm *virtualMachine, n int) error {
+       err := vm.applyCost(int64(n))
+       if err != nil {
+               return err
+       }
+       if len(vm.dataStack) < n {
+               return ErrDataStackUnderflow
+       }
+       for i := 0; i < n; i++ {
+               err = vm.push(vm.dataStack[len(vm.dataStack)-n], false)
+               if err != nil {
+                       return err
+               }
+       }
+       return nil
+}
+
+func op2Over(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       if len(vm.dataStack) < 4 {
+               return ErrDataStackUnderflow
+       }
+       for i := 0; i < 2; i++ {
+               err = vm.push(vm.dataStack[len(vm.dataStack)-4], false)
+               if err != nil {
+                       return err
+               }
+       }
+       return nil
+}
+
+func op2Rot(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       if len(vm.dataStack) < 6 {
+               return ErrDataStackUnderflow
+       }
+       newStack := make([][]byte, 0, len(vm.dataStack))
+       newStack = append(newStack, vm.dataStack[:len(vm.dataStack)-6]...)
+       newStack = append(newStack, vm.dataStack[len(vm.dataStack)-4:]...)
+       newStack = append(newStack, vm.dataStack[len(vm.dataStack)-6])
+       newStack = append(newStack, vm.dataStack[len(vm.dataStack)-5])
+       vm.dataStack = newStack
+       return nil
+}
+
+func op2Swap(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       if len(vm.dataStack) < 4 {
+               return ErrDataStackUnderflow
+       }
+       newStack := make([][]byte, 0, len(vm.dataStack))
+       newStack = append(newStack, vm.dataStack[:len(vm.dataStack)-4]...)
+       newStack = append(newStack, vm.dataStack[len(vm.dataStack)-2:]...)
+       newStack = append(newStack, vm.dataStack[len(vm.dataStack)-4])
+       newStack = append(newStack, vm.dataStack[len(vm.dataStack)-3])
+       vm.dataStack = newStack
+       return nil
+}
+
+func opIfDup(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+       item, err := vm.top()
+       if err != nil {
+               return err
+       }
+       if AsBool(item) {
+               err = vm.push(item, false)
+               if err != nil {
+                       return err
+               }
+       }
+       return nil
+}
+
+func opDepth(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+       err = vm.pushInt64(int64(len(vm.dataStack)), false)
+       if err != nil {
+               return err
+       }
+       return nil
+}
+
+func opDrop(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+       _, err = vm.pop(false)
+       if err != nil {
+               return err
+       }
+       return nil
+}
+
+func opDup(vm *virtualMachine) error {
+       return nDup(vm, 1)
+}
+
+func opNip(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+       top, err := vm.top()
+       if err != nil {
+               return err
+       }
+       // temporarily pop off the top value with no standard memory accounting
+       vm.dataStack = vm.dataStack[:len(vm.dataStack)-1]
+       _, err = vm.pop(false)
+       if err != nil {
+               return err
+       }
+       // now put the top item back
+       vm.dataStack = append(vm.dataStack, top)
+       return nil
+}
+
+func opOver(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+       if len(vm.dataStack) < 2 {
+               return ErrDataStackUnderflow
+       }
+       err = vm.push(vm.dataStack[len(vm.dataStack)-2], false)
+       if err != nil {
+               return err
+       }
+       return nil
+}
+
+func opPick(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       n, err := vm.popInt64(false)
+       if err != nil {
+               return err
+       }
+       if n < 0 {
+               return ErrBadValue
+       }
+       off, ok := checked.AddInt64(n, 1)
+       if !ok {
+               return ErrBadValue
+       }
+       if int64(len(vm.dataStack)) < off {
+               return ErrDataStackUnderflow
+       }
+       err = vm.push(vm.dataStack[int64(len(vm.dataStack))-(off)], false)
+       if err != nil {
+               return err
+       }
+       return nil
+}
+
+func opRoll(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       n, err := vm.popInt64(false)
+       if err != nil {
+               return err
+       }
+       if n < 0 {
+               return ErrBadValue
+       }
+       off, ok := checked.AddInt64(n, 1)
+       if !ok {
+               return ErrBadValue
+       }
+       err = rot(vm, off)
+       if err != nil {
+               return err
+       }
+       return nil
+}
+
+func opRot(vm *virtualMachine) error {
+       err := vm.applyCost(2)
+       if err != nil {
+               return err
+       }
+       err = rot(vm, 3)
+       if err != nil {
+               return err
+       }
+       return nil
+}
+
+func rot(vm *virtualMachine, n int64) error {
+       if n < 1 {
+               return ErrBadValue
+       }
+       if int64(len(vm.dataStack)) < n {
+               return ErrDataStackUnderflow
+       }
+       index := int64(len(vm.dataStack)) - n
+       newStack := make([][]byte, 0, len(vm.dataStack))
+       newStack = append(newStack, vm.dataStack[:index]...)
+       newStack = append(newStack, vm.dataStack[index+1:]...)
+       newStack = append(newStack, vm.dataStack[index])
+       vm.dataStack = newStack
+       return nil
+}
+
+func opSwap(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+       l := len(vm.dataStack)
+       if l < 2 {
+               return ErrDataStackUnderflow
+       }
+       vm.dataStack[l-1], vm.dataStack[l-2] = vm.dataStack[l-2], vm.dataStack[l-1]
+       return nil
+}
+
+func opTuck(vm *virtualMachine) error {
+       err := vm.applyCost(1)
+       if err != nil {
+               return err
+       }
+       if len(vm.dataStack) < 2 {
+               return ErrDataStackUnderflow
+       }
+       top2 := make([][]byte, 2)
+       copy(top2, vm.dataStack[len(vm.dataStack)-2:])
+       // temporarily remove the top two items without standard memory accounting
+       vm.dataStack = vm.dataStack[:len(vm.dataStack)-2]
+       err = vm.push(top2[1], false)
+       if err != nil {
+               return err
+       }
+       vm.dataStack = append(vm.dataStack, top2...)
+       return nil
+}
diff --git a/protocol/vm/stack_test.go b/protocol/vm/stack_test.go
new file mode 100644 (file)
index 0000000..148efb1
--- /dev/null
@@ -0,0 +1,423 @@
+package vm
+
+import (
+       "fmt"
+       "reflect"
+       "runtime"
+       "strings"
+       "testing"
+
+       "chain/testutil"
+)
+
+func TestStackOps(t *testing.T) {
+       type testStruct struct {
+               op      Op
+               startVM *virtualMachine
+               wantErr error
+               wantVM  *virtualMachine
+       }
+
+       cases := []testStruct{{
+               op: OP_TOALTSTACK,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49998,
+                       dataStack: [][]byte{},
+                       altStack:  [][]byte{{1}},
+               },
+       }, {
+               op: OP_FROMALTSTACK,
+               startVM: &virtualMachine{
+                       runLimit: 50000,
+                       altStack: [][]byte{{1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49998,
+                       altStack:  [][]byte{},
+                       dataStack: [][]byte{{1}},
+               },
+       }, {
+               op: OP_FROMALTSTACK,
+               startVM: &virtualMachine{
+                       runLimit: 50000,
+                       altStack: [][]byte{},
+               },
+               wantErr: ErrAltStackUnderflow,
+       }, {
+               op: OP_2DROP,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{1}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  50016,
+                       dataStack: [][]byte{},
+               },
+       }, {
+               op: OP_2DUP,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49980,
+                       dataStack: [][]byte{{2}, {1}, {2}, {1}},
+               },
+       }, {
+               op: OP_3DUP,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{3}, {2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49970,
+                       dataStack: [][]byte{{3}, {2}, {1}, {3}, {2}, {1}},
+               },
+       }, {
+               op: OP_2OVER,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{4}, {3}, {2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49980,
+                       dataStack: [][]byte{{4}, {3}, {2}, {1}, {4}, {3}},
+               },
+       }, {
+               op: OP_2OVER,
+               startVM: &virtualMachine{
+                       runLimit:  2,
+                       dataStack: [][]byte{{4}, {3}, {2}, {1}},
+               },
+               wantErr: ErrRunLimitExceeded,
+       }, {
+               op: OP_2ROT,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{6}, {5}, {4}, {3}, {2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49998,
+                       dataStack: [][]byte{{4}, {3}, {2}, {1}, {6}, {5}},
+               },
+       }, {
+               op: OP_2SWAP,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{4}, {3}, {2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49998,
+                       dataStack: [][]byte{{2}, {1}, {4}, {3}},
+               },
+       }, {
+               op: OP_IFDUP,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49990,
+                       dataStack: [][]byte{{1}, {1}},
+               },
+       }, {
+               op: OP_IFDUP,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49999,
+                       dataStack: [][]byte{{}},
+               },
+       }, {
+               op: OP_IFDUP,
+               startVM: &virtualMachine{
+                       runLimit:  1,
+                       dataStack: [][]byte{{1}},
+               },
+               wantErr: ErrRunLimitExceeded,
+       }, {
+               op: OP_DEPTH,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49990,
+                       dataStack: [][]byte{{1}, {1}},
+               },
+       }, {
+               op: OP_DEPTH,
+               startVM: &virtualMachine{
+                       runLimit:  1,
+                       dataStack: [][]byte{{1}},
+               },
+               wantErr: ErrRunLimitExceeded,
+       }, {
+               op: OP_DROP,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  50008,
+                       dataStack: [][]byte{},
+               },
+       }, {
+               op: OP_DUP,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49990,
+                       dataStack: [][]byte{{1}, {1}},
+               },
+       }, {
+               op: OP_DUP,
+               startVM: &virtualMachine{
+                       runLimit:  1,
+                       dataStack: [][]byte{{1}},
+               },
+               wantErr: ErrRunLimitExceeded,
+       }, {
+               op: OP_NIP,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  50008,
+                       dataStack: [][]byte{{1}},
+               },
+       }, {
+               op: OP_OVER,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49990,
+                       dataStack: [][]byte{{2}, {1}, {2}},
+               },
+       }, {
+               op: OP_OVER,
+               startVM: &virtualMachine{
+                       runLimit:  1,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantErr: ErrRunLimitExceeded,
+       }, {
+               op: OP_PICK,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{3}, {2}, {1}, {2}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49998,
+                       dataStack: [][]byte{{3}, {2}, {1}, {3}},
+               },
+       }, {
+               op: OP_PICK,
+               startVM: &virtualMachine{
+                       runLimit:  2,
+                       dataStack: [][]byte{{0xff, 0xff}, {2}, {1}, {2}},
+               },
+               wantErr: ErrRunLimitExceeded,
+       }, {
+               op: OP_ROLL,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{3}, {2}, {1}, {2}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  50007,
+                       dataStack: [][]byte{{2}, {1}, {3}},
+               },
+       }, {
+               op: OP_ROT,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{3}, {2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49998,
+                       dataStack: [][]byte{{2}, {1}, {3}},
+               },
+       }, {
+               op: OP_SWAP,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49999,
+                       dataStack: [][]byte{{1}, {2}},
+               },
+       }, {
+               op: OP_TUCK,
+               startVM: &virtualMachine{
+                       runLimit:  50000,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantVM: &virtualMachine{
+                       runLimit:  49990,
+                       dataStack: [][]byte{{1}, {2}, {1}},
+               },
+       }, {
+               op: OP_TUCK,
+               startVM: &virtualMachine{
+                       runLimit:  1,
+                       dataStack: [][]byte{{2}, {1}},
+               },
+               wantErr: ErrRunLimitExceeded,
+       }}
+       stackops := []Op{
+               OP_DEPTH, OP_FROMALTSTACK, OP_TOALTSTACK, OP_2DROP, OP_2DUP, OP_3DUP,
+               OP_2OVER, OP_2ROT, OP_2SWAP, OP_IFDUP, OP_DROP, OP_DUP, OP_NIP,
+               OP_OVER, OP_PICK, OP_ROLL, OP_ROT, OP_SWAP, OP_TUCK,
+       }
+       for _, op := range stackops {
+               cases = append(cases, testStruct{
+                       op:      op,
+                       startVM: &virtualMachine{runLimit: 0},
+                       wantErr: ErrRunLimitExceeded,
+               })
+       }
+
+       for i, c := range cases {
+               err := ops[c.op].fn(c.startVM)
+
+               if err != c.wantErr {
+                       t.Errorf("case %d, op %s: got err = %v want %v", i, ops[c.op].name, err, c.wantErr)
+                       continue
+               }
+               if c.wantErr != nil {
+                       continue
+               }
+
+               if !testutil.DeepEqual(c.startVM, c.wantVM) {
+                       t.Errorf("case %d, op %s: unexpected vm result\n\tgot:  %+v\n\twant: %+v\n", i, ops[c.op].name, c.startVM, c.wantVM)
+               }
+       }
+}
+
+func TestStackUnderflow(t *testing.T) {
+       cases := []struct {
+               narg int // number of stack items required
+               op   func(*virtualMachine) error
+       }{
+               // bitwise
+               {1, opInvert},
+               {2, opAnd},
+               {2, opOr},
+               {2, opXor},
+               {2, opEqual},
+               {2, opEqualVerify},
+
+               // control
+               {1, opVerify},
+               {3, opCheckPredicate},
+               {1, opJumpIf},
+
+               // crypto
+               {1, opSha256},
+               {1, opSha3},
+               {3, opCheckSig},
+               {3, opCheckMultiSig}, // special, see also TestCryptoOps
+
+               // introspection
+               {6, opCheckOutput},
+
+               // numeric
+               {1, op1Add},
+               {1, op1Sub},
+               {1, op2Mul},
+               {1, op2Div},
+               {1, opNegate},
+               {1, opAbs},
+               {1, opNot},
+               {1, op0NotEqual},
+               {2, opAdd},
+               {2, opSub},
+               {2, opMul},
+               {2, opDiv},
+               {2, opMod},
+               {2, opLshift},
+               {2, opRshift},
+               {2, opBoolAnd},
+               {2, opBoolOr},
+               {2, opNumEqual},
+               {2, opNumEqualVerify},
+               {2, opNumNotEqual},
+               {2, opLessThan},
+               {2, opGreaterThan},
+               {2, opLessThanOrEqual},
+               {2, opGreaterThanOrEqual},
+               {2, opMin},
+               {2, opMax},
+               {3, opWithin},
+
+               // splice
+               {2, opCat},
+               {3, opSubstr},
+               {2, opLeft},
+               {2, opRight},
+               {1, opSize},
+               {2, opCatpushdata},
+
+               // stack
+               {1, opToAltStack},
+               {2, op2Drop},
+               {2, op2Dup},
+               {3, op3Dup},
+               {4, op2Over},
+               {6, op2Rot},
+               {4, op2Swap},
+               {1, opIfDup},
+               {1, opDrop},
+               {1, opDup},
+               {2, opNip},
+               {2, opOver},
+               {2, opPick}, // TODO(kr): special; check data-dependent # of pops
+               {2, opRoll}, // TODO(kr): special; check data-dependent # of pops
+               {3, opRot},
+               {2, opSwap},
+               {2, opTuck},
+       }
+
+       for _, test := range cases {
+               t.Run(funcName(test.op), func(t *testing.T) {
+
+                       for i := 0; i < test.narg; i++ {
+                               t.Run(fmt.Sprintf("%d args", i), func(t *testing.T) {
+
+                                       vm := &virtualMachine{
+                                               runLimit:  50000,
+                                               dataStack: make([][]byte, i),
+                                       }
+                                       err := test.op(vm)
+                                       if err != ErrDataStackUnderflow {
+                                               t.Errorf("err = %v, want ErrStackUnderflow", err)
+                                       }
+
+                               })
+                       }
+
+               })
+       }
+}
+
+func funcName(f interface{}) string {
+       v := reflect.ValueOf(f)
+       if v.Kind() != reflect.Func {
+               return ""
+       }
+       s := runtime.FuncForPC(v.Pointer()).Name()
+       return s[strings.LastIndex(s, ".")+1:]
+}
diff --git a/protocol/vm/types.go b/protocol/vm/types.go
new file mode 100644 (file)
index 0000000..b92381d
--- /dev/null
@@ -0,0 +1,52 @@
+package vm
+
+import "encoding/binary"
+
+var trueBytes = []byte{1}
+
+func BoolBytes(b bool) (result []byte) {
+       if b {
+               return trueBytes
+       }
+       return []byte{}
+}
+
+func AsBool(bytes []byte) bool {
+       for _, b := range bytes {
+               if b != 0 {
+                       return true
+               }
+       }
+       return false
+}
+
+func Int64Bytes(n int64) []byte {
+       if n == 0 {
+               return []byte{}
+       }
+       res := make([]byte, 8)
+       // converting int64 to uint64 is a safe operation that
+       // preserves all data
+       binary.LittleEndian.PutUint64(res, uint64(n))
+       for len(res) > 0 && res[len(res)-1] == 0 {
+               res = res[:len(res)-1]
+       }
+       return res
+}
+
+func AsInt64(b []byte) (int64, error) {
+       if len(b) == 0 {
+               return 0, nil
+       }
+       if len(b) > 8 {
+               return 0, ErrBadValue
+       }
+
+       var padded [8]byte
+       copy(padded[:], b)
+
+       res := binary.LittleEndian.Uint64(padded[:])
+       // converting uint64 to int64 is a safe operation that
+       // preserves all data
+       return int64(res), nil
+}
diff --git a/protocol/vm/types_test.go b/protocol/vm/types_test.go
new file mode 100644 (file)
index 0000000..70031bc
--- /dev/null
@@ -0,0 +1,80 @@
+package vm
+
+import (
+       "bytes"
+       "testing"
+)
+
+func TestBoolBytes(t *testing.T) {
+       got := BoolBytes(true)
+       want := []byte{1}
+       if !bytes.Equal(got, want) {
+               t.Errorf("BoolBytes(t) = %x want %x", got, want)
+       }
+
+       got = BoolBytes(false)
+       want = []byte{}
+       if !bytes.Equal(got, want) {
+               t.Errorf("BoolBytes(f) = %x want %x", got, want)
+       }
+}
+
+func TestAsBool(t *testing.T) {
+       cases := []struct {
+               data []byte
+               want bool
+       }{
+               {[]byte{0, 0, 0, 0}, false},
+               {[]byte{0}, false},
+               {[]byte{}, false},
+               {[]byte{1}, true},
+               {[]byte{1, 1, 1, 1}, true},
+               {[]byte{0, 0, 0, 1}, true},
+               {[]byte{1, 0, 0, 0}, true},
+               {[]byte{2}, true},
+       }
+
+       for _, c := range cases {
+               got := AsBool(c.data)
+
+               if got != c.want {
+                       t.Errorf("AsBool(%x) = %v want %v", c.data, got, c.want)
+               }
+       }
+}
+
+func TestInt64(t *testing.T) {
+       cases := []struct {
+               num  int64
+               data []byte
+       }{
+               {0, []byte{}},
+               {1, []byte{0x01}},
+               {255, []byte{0xff}},
+               {256, []byte{0x00, 0x01}},
+               {1 << 16, []byte{0x00, 0x00, 0x01}},
+               {-1, []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
+               {-2, []byte{0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
+       }
+
+       for _, c := range cases {
+               gotData := Int64Bytes(c.num)
+
+               if !bytes.Equal(gotData, c.data) {
+                       t.Errorf("Int64Bytes(%d) = %x want %x", c.num, gotData, c.data)
+               }
+
+               gotNum, _ := AsInt64(c.data)
+
+               if gotNum != c.num {
+                       t.Errorf("AsInt64(%x) = %d want %d", c.data, gotNum, c.num)
+               }
+       }
+
+       data := []byte{1, 1, 1, 1, 1, 1, 1, 1, 1}
+       _, err := AsInt64(data)
+       want := ErrBadValue
+       if err != want {
+               t.Errorf("AsInt64(%x) = %v want %v", data, err, want)
+       }
+}
diff --git a/protocol/vm/vm.go b/protocol/vm/vm.go
new file mode 100644 (file)
index 0000000..dc2d523
--- /dev/null
@@ -0,0 +1,248 @@
+package vm
+
+import (
+       "encoding/hex"
+       "fmt"
+       "io"
+       "strings"
+
+       "chain/errors"
+)
+
+const initialRunLimit = 10000
+
+type virtualMachine struct {
+       context *Context
+
+       program      []byte // the program currently executing
+       pc, nextPC   uint32
+       runLimit     int64
+       deferredCost int64
+
+       expansionReserved bool
+
+       // Stores the data parsed out of an opcode. Used as input to
+       // data-pushing opcodes.
+       data []byte
+
+       // CHECKPREDICATE spawns a child vm with depth+1
+       depth int
+
+       // In each of these stacks, stack[len(stack)-1] is the top element.
+       dataStack [][]byte
+       altStack  [][]byte
+}
+
+// ErrFalseVMResult is one of the ways for a transaction to fail validation
+var ErrFalseVMResult = errors.New("false VM result")
+
+// TraceOut - if non-nil - will receive trace output during
+// execution.
+var TraceOut io.Writer
+
+func Verify(context *Context) (err error) {
+       defer func() {
+               if r := recover(); r != nil {
+                       if rErr, ok := r.(error); ok {
+                               err = errors.Sub(ErrUnexpected, rErr)
+                       } else {
+                               err = errors.Wrap(ErrUnexpected, r)
+                       }
+               }
+       }()
+
+       if context.VMVersion != 1 {
+               return ErrUnsupportedVM
+       }
+
+       vm := &virtualMachine{
+               expansionReserved: context.TxVersion != nil && *context.TxVersion == 1,
+               program:           context.Code,
+               runLimit:          initialRunLimit,
+               context:           context,
+       }
+
+       args := context.Arguments
+       for i, arg := range args {
+               err = vm.push(arg, false)
+               if err != nil {
+                       return errors.Wrapf(err, "pushing initial argument %d", i)
+               }
+       }
+
+       err = vm.run()
+       if err == nil && vm.falseResult() {
+               err = ErrFalseVMResult
+       }
+
+       return wrapErr(err, vm, args)
+}
+
+// falseResult returns true iff the stack is empty or the top
+// item is false
+func (vm *virtualMachine) falseResult() bool {
+       return len(vm.dataStack) == 0 || !AsBool(vm.dataStack[len(vm.dataStack)-1])
+}
+
+func (vm *virtualMachine) run() error {
+       for vm.pc = 0; vm.pc < uint32(len(vm.program)); { // handle vm.pc updates in step
+               err := vm.step()
+               if err != nil {
+                       return err
+               }
+       }
+       return nil
+}
+
+func (vm *virtualMachine) step() error {
+       inst, err := ParseOp(vm.program, vm.pc)
+       if err != nil {
+               return err
+       }
+
+       vm.nextPC = vm.pc + inst.Len
+
+       if TraceOut != nil {
+               opname := inst.Op.String()
+               fmt.Fprintf(TraceOut, "vm %d pc %d limit %d %s", vm.depth, vm.pc, vm.runLimit, opname)
+               if len(inst.Data) > 0 {
+                       fmt.Fprintf(TraceOut, " %x", inst.Data)
+               }
+               fmt.Fprint(TraceOut, "\n")
+       }
+
+       if isExpansion[inst.Op] {
+               if vm.expansionReserved {
+                       return ErrDisallowedOpcode
+               }
+               vm.pc = vm.nextPC
+               return vm.applyCost(1)
+       }
+
+       vm.deferredCost = 0
+       vm.data = inst.Data
+       err = ops[inst.Op].fn(vm)
+       if err != nil {
+               return err
+       }
+       err = vm.applyCost(vm.deferredCost)
+       if err != nil {
+               return err
+       }
+       vm.pc = vm.nextPC
+
+       if TraceOut != nil {
+               for i := len(vm.dataStack) - 1; i >= 0; i-- {
+                       fmt.Fprintf(TraceOut, "  stack %d: %x\n", len(vm.dataStack)-1-i, vm.dataStack[i])
+               }
+       }
+
+       return nil
+}
+
+func (vm *virtualMachine) push(data []byte, deferred bool) error {
+       cost := 8 + int64(len(data))
+       if deferred {
+               vm.deferCost(cost)
+       } else {
+               err := vm.applyCost(cost)
+               if err != nil {
+                       return err
+               }
+       }
+       vm.dataStack = append(vm.dataStack, data)
+       return nil
+}
+
+func (vm *virtualMachine) pushBool(b bool, deferred bool) error {
+       return vm.push(BoolBytes(b), deferred)
+}
+
+func (vm *virtualMachine) pushInt64(n int64, deferred bool) error {
+       return vm.push(Int64Bytes(n), deferred)
+}
+
+func (vm *virtualMachine) pop(deferred bool) ([]byte, error) {
+       if len(vm.dataStack) == 0 {
+               return nil, ErrDataStackUnderflow
+       }
+       res := vm.dataStack[len(vm.dataStack)-1]
+       vm.dataStack = vm.dataStack[:len(vm.dataStack)-1]
+
+       cost := 8 + int64(len(res))
+       if deferred {
+               vm.deferCost(-cost)
+       } else {
+               vm.runLimit += cost
+       }
+
+       return res, nil
+}
+
+func (vm *virtualMachine) popInt64(deferred bool) (int64, error) {
+       bytes, err := vm.pop(deferred)
+       if err != nil {
+               return 0, err
+       }
+       n, err := AsInt64(bytes)
+       return n, err
+}
+
+func (vm *virtualMachine) top() ([]byte, error) {
+       if len(vm.dataStack) == 0 {
+               return nil, ErrDataStackUnderflow
+       }
+       return vm.dataStack[len(vm.dataStack)-1], nil
+}
+
+// positive cost decreases runlimit, negative cost increases it
+func (vm *virtualMachine) applyCost(n int64) error {
+       if n > vm.runLimit {
+               return ErrRunLimitExceeded
+       }
+       vm.runLimit -= n
+       return nil
+}
+
+func (vm *virtualMachine) deferCost(n int64) {
+       vm.deferredCost += n
+}
+
+func stackCost(stack [][]byte) int64 {
+       result := int64(8 * len(stack))
+       for _, item := range stack {
+               result += int64(len(item))
+       }
+       return result
+}
+
+type Error struct {
+       Err  error
+       Prog []byte
+       Args [][]byte
+}
+
+func (e Error) Error() string {
+       dis, err := Disassemble(e.Prog)
+       if err != nil {
+               dis = "???"
+       }
+
+       args := make([]string, 0, len(e.Args))
+       for _, a := range e.Args {
+               args = append(args, hex.EncodeToString(a))
+       }
+
+       return fmt.Sprintf("%s [prog %x = %s; args %s]", e.Err.Error(), e.Prog, dis, strings.Join(args, " "))
+}
+
+func wrapErr(err error, vm *virtualMachine, args [][]byte) error {
+       if err == nil {
+               return nil
+       }
+       return Error{
+               Err:  err,
+               Prog: vm.program,
+               Args: args,
+       }
+}
diff --git a/protocol/vm/vm_test.go b/protocol/vm/vm_test.go
new file mode 100644 (file)
index 0000000..afe2cb5
--- /dev/null
@@ -0,0 +1,465 @@
+package vm
+
+import (
+       "bytes"
+       "fmt"
+       "os"
+       "strings"
+       "testing"
+       "testing/quick"
+
+       "chain/errors"
+       "chain/testutil"
+)
+
+type tracebuf struct {
+       bytes.Buffer
+}
+
+func (t tracebuf) dump() {
+       os.Stdout.Write(t.Bytes())
+}
+
+// Programs that run without error.
+func TestProgramOK(t *testing.T) {
+       doOKNotOK(t, true)
+}
+
+// Programs that return an ErrFalseVMResult.
+func TestProgramNotOK(t *testing.T) {
+       doOKNotOK(t, false)
+}
+
+func doOKNotOK(t *testing.T, expectOK bool) {
+       cases := []struct {
+               prog string
+               args [][]byte
+       }{
+               {"TRUE", nil},
+
+               // bitwise ops
+               {"INVERT 0xfef0 EQUAL", [][]byte{{0x01, 0x0f}}},
+
+               {"AND 0x02 EQUAL", [][]byte{{0x03}, {0x06}}},
+               {"AND 0x02 EQUAL", [][]byte{{0x03, 0xff}, {0x06}}},
+
+               {"OR 0x07 EQUAL", [][]byte{{0x03}, {0x06}}},
+               {"OR 0x07ff EQUAL", [][]byte{{0x03, 0xff}, {0x06}}},
+
+               {"XOR 0x05 EQUAL", [][]byte{{0x03}, {0x06}}},
+               {"XOR 0x05ff EQUAL", [][]byte{{0x03, 0xff}, {0x06}}},
+
+               // numeric and logical ops
+               {"1ADD 2 NUMEQUAL", [][]byte{Int64Bytes(1)}},
+               {"1ADD 0 NUMEQUAL", [][]byte{Int64Bytes(-1)}},
+
+               {"1SUB 1 NUMEQUAL", [][]byte{Int64Bytes(2)}},
+               {"1SUB -1 NUMEQUAL", [][]byte{Int64Bytes(0)}},
+
+               {"2MUL 2 NUMEQUAL", [][]byte{Int64Bytes(1)}},
+               {"2MUL 0 NUMEQUAL", [][]byte{Int64Bytes(0)}},
+               {"2MUL -2 NUMEQUAL", [][]byte{Int64Bytes(-1)}},
+
+               {"2DIV 1 NUMEQUAL", [][]byte{Int64Bytes(2)}},
+               {"2DIV 0 NUMEQUAL", [][]byte{Int64Bytes(1)}},
+               {"2DIV 0 NUMEQUAL", [][]byte{Int64Bytes(0)}},
+               {"2DIV -1 NUMEQUAL", [][]byte{Int64Bytes(-1)}},
+               {"2DIV -1 NUMEQUAL", [][]byte{Int64Bytes(-2)}},
+
+               {"NEGATE -1 NUMEQUAL", [][]byte{Int64Bytes(1)}},
+               {"NEGATE 1 NUMEQUAL", [][]byte{Int64Bytes(-1)}},
+               {"NEGATE 0 NUMEQUAL", [][]byte{Int64Bytes(0)}},
+
+               {"ABS 1 NUMEQUAL", [][]byte{Int64Bytes(1)}},
+               {"ABS 1 NUMEQUAL", [][]byte{Int64Bytes(-1)}},
+               {"ABS 0 NUMEQUAL", [][]byte{Int64Bytes(0)}},
+
+               {"0NOTEQUAL", [][]byte{Int64Bytes(1)}},
+               {"0NOTEQUAL NOT", [][]byte{Int64Bytes(0)}},
+
+               {"ADD 5 NUMEQUAL", [][]byte{Int64Bytes(2), Int64Bytes(3)}},
+
+               {"SUB 2 NUMEQUAL", [][]byte{Int64Bytes(5), Int64Bytes(3)}},
+
+               {"MUL 6 NUMEQUAL", [][]byte{Int64Bytes(2), Int64Bytes(3)}},
+
+               {"DIV 2 NUMEQUAL", [][]byte{Int64Bytes(6), Int64Bytes(3)}},
+
+               {"MOD 0 NUMEQUAL", [][]byte{Int64Bytes(6), Int64Bytes(2)}},
+               {"MOD 0 NUMEQUAL", [][]byte{Int64Bytes(-6), Int64Bytes(2)}},
+               {"MOD 0 NUMEQUAL", [][]byte{Int64Bytes(6), Int64Bytes(-2)}},
+               {"MOD 0 NUMEQUAL", [][]byte{Int64Bytes(-6), Int64Bytes(-2)}},
+               {"MOD 2 NUMEQUAL", [][]byte{Int64Bytes(12), Int64Bytes(10)}},
+               {"MOD 8 NUMEQUAL", [][]byte{Int64Bytes(-12), Int64Bytes(10)}},
+               {"MOD -8 NUMEQUAL", [][]byte{Int64Bytes(12), Int64Bytes(-10)}},
+               {"MOD -2 NUMEQUAL", [][]byte{Int64Bytes(-12), Int64Bytes(-10)}},
+
+               {"LSHIFT 2 NUMEQUAL", [][]byte{Int64Bytes(1), Int64Bytes(1)}},
+               {"LSHIFT 4 NUMEQUAL", [][]byte{Int64Bytes(1), Int64Bytes(2)}},
+               {"LSHIFT -2 NUMEQUAL", [][]byte{Int64Bytes(-1), Int64Bytes(1)}},
+               {"LSHIFT -4 NUMEQUAL", [][]byte{Int64Bytes(-1), Int64Bytes(2)}},
+
+               {"1 1 BOOLAND", nil},
+               {"1 0 BOOLAND NOT", nil},
+               {"0 1 BOOLAND NOT", nil},
+               {"0 0 BOOLAND NOT", nil},
+
+               {"1 1 BOOLOR", nil},
+               {"1 0 BOOLOR", nil},
+               {"0 1 BOOLOR", nil},
+               {"0 0 BOOLOR NOT", nil},
+
+               {"1 2 OR 3 EQUAL", nil},
+
+               // splice ops
+               {"0 CATPUSHDATA 0x0000 EQUAL", [][]byte{{0x00}}},
+               {"0 0xff CATPUSHDATA 0x01ff EQUAL", nil},
+               {"CATPUSHDATA 0x050105 EQUAL", [][]byte{{0x05}, {0x05}}},
+               {"CATPUSHDATA 0xff01ff EQUAL", [][]byte{{0xff}, {0xff}}},
+               {"0 0xcccccc CATPUSHDATA 0x03cccccc EQUAL", nil},
+               {"0x05 0x05 SWAP 0xdeadbeef CATPUSHDATA DROP 0x05 EQUAL", nil},
+               {"0x05 0x05 SWAP 0xdeadbeef CATPUSHDATA DROP 0x05 EQUAL", nil},
+
+               // // control flow ops
+               {"1 JUMP:7 0 1 EQUAL", nil},                                                       // jumps over 0
+               {"1 JUMP:$target 0 $target 1 EQUAL", nil},                                         // jumps over 0
+               {"1 1 JUMPIF:8 0 1 EQUAL", nil},                                                   // jumps over 0
+               {"1 1 JUMPIF:$target 0 $target 1 EQUAL", nil},                                     // jumps over 0
+               {"1 0 JUMPIF:8 0 1 EQUAL NOT", nil},                                               // doesn't jump over 0
+               {"1 0 JUMPIF:$target 0 $target 1 EQUAL NOT", nil},                                 // doesn't jump over 0
+               {"1 0 JUMPIF:1", nil},                                                             // doesn't jump, so no infinite loop
+               {"1 $target 0 JUMPIF:$target", nil},                                               // doesn't jump, so no infinite loop
+               {"4 1 JUMPIF:14 5 EQUAL JUMP:16 4 EQUAL", nil},                                    // if (true) { return x == 4; } else { return x == 5; }
+               {"4 1 JUMPIF:$true 5 EQUAL JUMP:$end $true 4 EQUAL $end", nil},                    // if (true) { return x == 4; } else { return x == 5; }
+               {"5 0 JUMPIF:14 5 EQUAL JUMP:16 4 EQUAL", nil},                                    // if (false) { return x == 4; } else { return x == 5; }
+               {"5 0 JUMPIF:$true 5 EQUAL JUMP:$end $true 4 $test EQUAL $end", nil},              // if (false) { return x == 4; } else { return x == 5; }
+               {"0 1 2 3 4 5 6 JUMP:13 DROP DUP 0 NUMNOTEQUAL JUMPIF:12 1", nil},                 // same as "0 1 2 3 4 5 6 WHILE DROP ENDWHILE 1"
+               {"0 1 2 3 4 5 6 JUMP:$dup $drop DROP $dup DUP 0 NUMNOTEQUAL JUMPIF:$drop 1", nil}, // same as "0 1 2 3 4 5 6 WHILE DROP ENDWHILE 1"
+               {"0 JUMP:7 1ADD DUP 10 LESSTHAN JUMPIF:6 10 NUMEQUAL", nil},                       // fixed version of "0 1 WHILE DROP 1ADD DUP 10 LESSTHAN ENDWHILE 10 NUMEQUAL"
+               {"0 JUMP:$dup $add 1ADD $dup DUP 10 LESSTHAN JUMPIF:$add 10 NUMEQUAL", nil},       // fixed version of "0 1 WHILE DROP 1ADD DUP 10 LESSTHAN ENDWHILE 10 NUMEQUAL"
+
+       }
+       for i, c := range cases {
+               progSrc := c.prog
+               if !expectOK {
+                       progSrc += " NOT"
+               }
+               prog, err := Assemble(progSrc)
+               if err != nil {
+                       t.Fatal(err)
+               }
+               fmt.Printf("* case %d, prog [%s] [%x]\n", i, progSrc, prog)
+               trace := new(tracebuf)
+               TraceOut = trace
+               vm := &virtualMachine{
+                       program:   prog,
+                       runLimit:  int64(initialRunLimit),
+                       dataStack: append([][]byte{}, c.args...),
+               }
+               err = vm.run()
+               if err == nil && vm.falseResult() {
+                       err = ErrFalseVMResult
+               }
+               if expectOK && err != nil {
+                       trace.dump()
+                       t.Errorf("case %d [%s]: expected success, got error %s", i, progSrc, err)
+               } else if !expectOK && err != ErrFalseVMResult {
+                       trace.dump()
+                       t.Errorf("case %d [%s]: expected ErrFalseVMResult, got %s", i, progSrc, err)
+               }
+       }
+}
+
+func TestVerifyTxInput(t *testing.T) {
+       cases := []struct {
+               vctx    *Context
+               wantErr error
+       }{
+               {
+                       vctx: &Context{
+                               VMVersion: 1,
+                               Code:      []byte{byte(OP_ADD), byte(OP_5), byte(OP_NUMEQUAL)},
+                               Arguments: [][]byte{{2}, {3}},
+                       },
+               },
+               {
+                       vctx:    &Context{VMVersion: 2},
+                       wantErr: ErrUnsupportedVM,
+               },
+               {
+                       vctx: &Context{
+                               VMVersion: 1,
+                               Code:      []byte{byte(OP_ADD), byte(OP_5), byte(OP_NUMEQUAL)},
+                               Arguments: [][]byte{make([]byte, 50001)},
+                       },
+                       wantErr: ErrRunLimitExceeded,
+               },
+       }
+
+       for _, c := range cases {
+               gotErr := Verify(c.vctx)
+               if errors.Root(gotErr) != c.wantErr {
+                       t.Errorf("VerifyTxInput(%+v) err = %v want %v", c.vctx, gotErr, c.wantErr)
+               }
+       }
+}
+
+func TestVerifyBlockHeader(t *testing.T) {
+       consensusProg := []byte{byte(OP_ADD), byte(OP_5), byte(OP_NUMEQUAL)}
+       context := &Context{
+               VMVersion: 1,
+               Code:      consensusProg,
+               Arguments: [][]byte{{2}, {3}},
+       }
+       gotErr := Verify(context)
+       if gotErr != nil {
+               t.Errorf("unexpected error: %v", gotErr)
+       }
+
+       context = &Context{
+               VMVersion: 1,
+               Arguments: [][]byte{make([]byte, 50000)},
+       }
+       gotErr = Verify(context)
+       if errors.Root(gotErr) != ErrRunLimitExceeded {
+               t.Error("expected block to exceed run limit")
+       }
+}
+
+func TestRun(t *testing.T) {
+       cases := []struct {
+               vm      *virtualMachine
+               wantErr error
+       }{{
+               vm: &virtualMachine{runLimit: 50000, program: []byte{byte(OP_TRUE)}},
+       }, {
+               vm:      &virtualMachine{runLimit: 50000, program: []byte{byte(OP_ADD)}},
+               wantErr: ErrDataStackUnderflow,
+       }}
+
+       for i, c := range cases {
+               gotErr := c.vm.run()
+
+               if gotErr != c.wantErr {
+                       t.Errorf("run test %d: got err = %v want %v", i, gotErr, c.wantErr)
+                       continue
+               }
+
+               if c.wantErr != nil {
+                       continue
+               }
+       }
+}
+
+func TestStep(t *testing.T) {
+       txVMContext := &Context{DestPos: new(uint64)}
+       cases := []struct {
+               startVM *virtualMachine
+               wantVM  *virtualMachine
+               wantErr error
+       }{{
+               startVM: &virtualMachine{
+                       program:  []byte{byte(OP_TRUE)},
+                       runLimit: 50000,
+               },
+               wantVM: &virtualMachine{
+                       program:   []byte{byte(OP_TRUE)},
+                       runLimit:  49990,
+                       dataStack: [][]byte{{1}},
+                       pc:        1,
+                       nextPC:    1,
+                       data:      []byte{1},
+               },
+       }, {
+               startVM: &virtualMachine{
+                       program:   []byte{byte(OP_TRUE), byte(OP_JUMP), byte(0xff), byte(0x00), byte(0x00), byte(0x00)},
+                       runLimit:  49990,
+                       dataStack: [][]byte{},
+                       pc:        1,
+               },
+               wantVM: &virtualMachine{
+                       program:      []byte{byte(OP_TRUE), byte(OP_JUMP), byte(0xff), byte(0x00), byte(0x00), byte(0x00)},
+                       runLimit:     49989,
+                       dataStack:    [][]byte{},
+                       data:         []byte{byte(0xff), byte(0x00), byte(0x00), byte(0x00)},
+                       pc:           255,
+                       nextPC:       255,
+                       deferredCost: 0,
+               },
+       }, {
+               startVM: &virtualMachine{
+                       program:   []byte{byte(OP_TRUE), byte(OP_JUMPIF), byte(0x00), byte(0x00), byte(0x00), byte(0x00)},
+                       runLimit:  49995,
+                       dataStack: [][]byte{{1}},
+                       pc:        1,
+               },
+               wantVM: &virtualMachine{
+                       program:      []byte{byte(OP_TRUE), byte(OP_JUMPIF), byte(0x00), byte(0x00), byte(0x00), byte(0x00)},
+                       runLimit:     50003,
+                       dataStack:    [][]byte{},
+                       pc:           0,
+                       nextPC:       0,
+                       data:         []byte{byte(0x00), byte(0x00), byte(0x00), byte(0x00)},
+                       deferredCost: -9,
+               },
+       }, {
+               startVM: &virtualMachine{
+                       program:   []byte{byte(OP_FALSE), byte(OP_JUMPIF), byte(0x00), byte(0x00), byte(0x00), byte(0x00)},
+                       runLimit:  49995,
+                       dataStack: [][]byte{{}},
+                       pc:        1,
+               },
+               wantVM: &virtualMachine{
+                       program:      []byte{byte(OP_FALSE), byte(OP_JUMPIF), byte(0x00), byte(0x00), byte(0x00), byte(0x00)},
+                       runLimit:     50002,
+                       dataStack:    [][]byte{},
+                       pc:           6,
+                       nextPC:       6,
+                       data:         []byte{byte(0x00), byte(0x00), byte(0x00), byte(0x00)},
+                       deferredCost: -8,
+               },
+       }, {
+               startVM: &virtualMachine{
+                       program:   []byte{255},
+                       runLimit:  50000,
+                       dataStack: [][]byte{},
+               },
+               wantVM: &virtualMachine{
+                       program:   []byte{255},
+                       runLimit:  49999,
+                       pc:        1,
+                       nextPC:    1,
+                       dataStack: [][]byte{},
+               },
+       }, {
+               startVM: &virtualMachine{
+                       program:  []byte{byte(OP_ADD)},
+                       runLimit: 50000,
+               },
+               wantErr: ErrDataStackUnderflow,
+       }, {
+               startVM: &virtualMachine{
+                       program:  []byte{byte(OP_INDEX)},
+                       runLimit: 1,
+                       context:  txVMContext,
+               },
+               wantErr: ErrRunLimitExceeded,
+       }, {
+               startVM: &virtualMachine{
+                       program:           []byte{255},
+                       runLimit:          100,
+                       expansionReserved: true,
+               },
+               wantErr: ErrDisallowedOpcode,
+       }, {
+               startVM: &virtualMachine{
+                       program:  []byte{255},
+                       runLimit: 100,
+               },
+               wantVM: &virtualMachine{
+                       program:  []byte{255},
+                       runLimit: 99,
+                       pc:       1,
+                       nextPC:   1,
+               },
+       }}
+
+       for i, c := range cases {
+               gotErr := c.startVM.step()
+               gotVM := c.startVM
+
+               if gotErr != c.wantErr {
+                       t.Errorf("step test %d: got err = %v want %v", i, gotErr, c.wantErr)
+                       continue
+               }
+
+               if c.wantErr != nil {
+                       continue
+               }
+
+               if !testutil.DeepEqual(gotVM, c.wantVM) {
+                       t.Errorf("step test %d:\n\tgot vm:  %+v\n\twant vm: %+v", i, gotVM, c.wantVM)
+               }
+       }
+}
+
+func decompile(prog []byte) string {
+       var strs []string
+       for i := uint32(0); i < uint32(len(prog)); { // update i inside the loop
+               inst, err := ParseOp(prog, i)
+               if err != nil {
+                       strs = append(strs, fmt.Sprintf("<%x>", prog[i]))
+                       i++
+                       continue
+               }
+               var str string
+               if len(inst.Data) > 0 {
+                       str = fmt.Sprintf("0x%x", inst.Data)
+               } else {
+                       str = inst.Op.String()
+               }
+               strs = append(strs, str)
+               i += inst.Len
+       }
+       return strings.Join(strs, " ")
+}
+
+func TestVerifyTxInputQuickCheck(t *testing.T) {
+       f := func(program []byte, witnesses [][]byte) (ok bool) {
+               defer func() {
+                       if err := recover(); err != nil {
+                               t.Log(decompile(program))
+                               for i := range witnesses {
+                                       t.Logf("witness %d: %x\n", i, witnesses[i])
+                               }
+                               t.Log(err)
+                               ok = false
+                       }
+               }()
+
+               vctx := &Context{
+                       VMVersion: 1,
+                       Code:      program,
+                       Arguments: witnesses,
+
+                       // Leaving this out reduces coverage.
+                       // TODO(kr): figure out why and convert that
+                       // to a normal unit test.
+                       MaxTimeMS: new(uint64),
+               }
+               Verify(vctx)
+
+               return true
+       }
+       if err := quick.Check(f, nil); err != nil {
+               t.Error(err)
+       }
+}
+
+func TestVerifyBlockHeaderQuickCheck(t *testing.T) {
+       f := func(program []byte, witnesses [][]byte) (ok bool) {
+               defer func() {
+                       if err := recover(); err != nil {
+                               t.Log(decompile(program))
+                               for i := range witnesses {
+                                       t.Logf("witness %d: %x\n", i, witnesses[i])
+                               }
+                               t.Log(err)
+                               ok = false
+                       }
+               }()
+               context := &Context{
+                       VMVersion:            1,
+                       Code:                 program,
+                       Arguments:            witnesses,
+                       BlockHash:            new([]byte),
+                       BlockTimeMS:          new(uint64),
+                       NextConsensusProgram: &[]byte{},
+               }
+               Verify(context)
+               return true
+       }
+       if err := quick.Check(f, nil); err != nil {
+               t.Error(err)
+       }
+}
diff --git a/protocol/vm/vmutil/builder.go b/protocol/vm/vmutil/builder.go
new file mode 100644 (file)
index 0000000..d3d8fe8
--- /dev/null
@@ -0,0 +1,114 @@
+package vmutil
+
+import (
+       "encoding/binary"
+
+       "chain/errors"
+       "chain/protocol/vm"
+)
+
+type Builder struct {
+       program     []byte
+       jumpCounter int
+
+       // Maps a jump target number to its absolute address.
+       jumpAddr map[int]uint32
+
+       // Maps a jump target number to the list of places where its
+       // absolute address must be filled in once known.
+       jumpPlaceholders map[int][]int
+}
+
+func NewBuilder() *Builder {
+       return &Builder{
+               jumpAddr:         make(map[int]uint32),
+               jumpPlaceholders: make(map[int][]int),
+       }
+}
+
+// AddInt64 adds a pushdata instruction for an integer value.
+func (b *Builder) AddInt64(n int64) *Builder {
+       b.program = append(b.program, vm.PushdataInt64(n)...)
+       return b
+}
+
+// AddData adds a pushdata instruction for a given byte string.
+func (b *Builder) AddData(data []byte) *Builder {
+       b.program = append(b.program, vm.PushdataBytes(data)...)
+       return b
+}
+
+// AddRawBytes simply appends the given bytes to the program. (It does
+// not introduce a pushdata opcode.)
+func (b *Builder) AddRawBytes(data []byte) *Builder {
+       b.program = append(b.program, data...)
+       return b
+}
+
+// AddOp adds the given opcode to the program.
+func (b *Builder) AddOp(op vm.Op) *Builder {
+       b.program = append(b.program, byte(op))
+       return b
+}
+
+// NewJumpTarget allocates a number that can be used as a jump target
+// in AddJump and AddJumpIf. Call SetJumpTarget to associate the
+// number with a program location.
+func (b *Builder) NewJumpTarget() int {
+       b.jumpCounter++
+       return b.jumpCounter
+}
+
+// AddJump adds a JUMP opcode whose target is the given target
+// number. The actual program location of the target does not need to
+// be known yet, as long as SetJumpTarget is called before Build.
+func (b *Builder) AddJump(target int) *Builder {
+       return b.addJump(vm.OP_JUMP, target)
+}
+
+// AddJump adds a JUMPIF opcode whose target is the given target
+// number. The actual program location of the target does not need to
+// be known yet, as long as SetJumpTarget is called before Build.
+func (b *Builder) AddJumpIf(target int) *Builder {
+       return b.addJump(vm.OP_JUMPIF, target)
+}
+
+func (b *Builder) addJump(op vm.Op, target int) *Builder {
+       b.AddOp(op)
+       b.jumpPlaceholders[target] = append(b.jumpPlaceholders[target], len(b.program))
+       b.AddRawBytes([]byte{0, 0, 0, 0})
+       return b
+}
+
+// SetJumpTarget associates the given jump-target number with the
+// current position in the program - namely, the program's length,
+// such that the first instruction executed by a jump using this
+// target will be whatever instruction is added next. It is legal for
+// SetJumpTarget to be called at the end of the program, causing jumps
+// using that target to fall off the end. There must be a call to
+// SetJumpTarget for every jump target used before any call to Build.
+func (b *Builder) SetJumpTarget(target int) *Builder {
+       b.jumpAddr[target] = uint32(len(b.program))
+       return b
+}
+
+var ErrUnresolvedJump = errors.New("unresolved jump target")
+
+// Build produces the bytecode of the program. It first resolves any
+// jumps in the program by filling in the addresses of their
+// targets. This requires SetJumpTarget to be called prior to Build
+// for each jump target used (in a call to AddJump or AddJumpIf). If
+// any target's address hasn't been set in this way, this function
+// produces ErrUnresolvedJump. There are no other error conditions.
+func (b *Builder) Build() ([]byte, error) {
+       for target, placeholders := range b.jumpPlaceholders {
+               addr, ok := b.jumpAddr[target]
+               if !ok {
+                       return nil, errors.Wrapf(ErrUnresolvedJump, "target %d", target)
+               }
+               for _, placeholder := range placeholders {
+                       binary.LittleEndian.PutUint32(b.program[placeholder:placeholder+4], addr)
+               }
+       }
+       return b.program, nil
+}
diff --git a/protocol/vm/vmutil/builder_test.go b/protocol/vm/vmutil/builder_test.go
new file mode 100644 (file)
index 0000000..f6be334
--- /dev/null
@@ -0,0 +1,122 @@
+package vmutil
+
+import (
+       "bytes"
+       "encoding/hex"
+       "testing"
+
+       "chain/protocol/vm"
+)
+
+func TestAddJump(t *testing.T) {
+       cases := []struct {
+               name    string
+               wantHex string
+               fn      func(t *testing.T, b *Builder)
+       }{
+               {
+                       "single jump single target not yet defined",
+                       "630600000061",
+                       func(t *testing.T, b *Builder) {
+                               target := b.NewJumpTarget()
+                               b.AddJump(target)
+                               b.AddOp(vm.OP_NOP)
+                               b.SetJumpTarget(target)
+                       },
+               },
+               {
+                       "single jump single target already defined",
+                       "616300000000",
+                       func(t *testing.T, b *Builder) {
+                               target := b.NewJumpTarget()
+                               b.SetJumpTarget(target)
+                               b.AddOp(vm.OP_NOP)
+                               b.AddJump(target)
+                       },
+               },
+               {
+                       "two jumps single target not yet defined",
+                       "630c00000061630c00000061",
+                       func(t *testing.T, b *Builder) {
+                               target := b.NewJumpTarget()
+                               b.AddJump(target)
+                               b.AddOp(vm.OP_NOP)
+                               b.AddJump(target)
+                               b.AddOp(vm.OP_NOP)
+                               b.SetJumpTarget(target)
+                       },
+               },
+               {
+                       "two jumps single target already defined",
+                       "616300000000616300000000",
+                       func(t *testing.T, b *Builder) {
+                               target := b.NewJumpTarget()
+                               b.SetJumpTarget(target)
+                               b.AddOp(vm.OP_NOP)
+                               b.AddJump(target)
+                               b.AddOp(vm.OP_NOP)
+                               b.AddJump(target)
+                       },
+               },
+               {
+                       "two jumps single target, one not yet defined, one already defined",
+                       "630600000061616306000000",
+                       func(t *testing.T, b *Builder) {
+                               target := b.NewJumpTarget()
+                               b.AddJump(target)
+                               b.AddOp(vm.OP_NOP)
+                               b.SetJumpTarget(target)
+                               b.AddOp(vm.OP_NOP)
+                               b.AddJump(target)
+                       },
+               },
+               {
+                       "two jumps, two targets, not yet defined",
+                       "630c00000061630d0000006161",
+                       func(t *testing.T, b *Builder) {
+                               target1 := b.NewJumpTarget()
+                               b.AddJump(target1)
+                               b.AddOp(vm.OP_NOP)
+                               target2 := b.NewJumpTarget()
+                               b.AddJump(target2)
+                               b.AddOp(vm.OP_NOP)
+                               b.SetJumpTarget(target1)
+                               b.AddOp(vm.OP_NOP)
+                               b.SetJumpTarget(target2)
+                       },
+               },
+               {
+                       "two jumps, two targets, already defined",
+                       "6161616301000000616302000000",
+                       func(t *testing.T, b *Builder) {
+                               b.AddOp(vm.OP_NOP)
+                               target1 := b.NewJumpTarget()
+                               b.SetJumpTarget(target1)
+                               b.AddOp(vm.OP_NOP)
+                               target2 := b.NewJumpTarget()
+                               b.SetJumpTarget(target2)
+                               b.AddOp(vm.OP_NOP)
+                               b.AddJump(target1)
+                               b.AddOp(vm.OP_NOP)
+                               b.AddJump(target2)
+                       },
+               },
+       }
+       for _, c := range cases {
+               t.Run(c.name, func(t *testing.T) {
+                       b := NewBuilder()
+                       c.fn(t, b)
+                       prog, err := b.Build()
+                       if err != nil {
+                               t.Fatal(err)
+                       }
+                       want, err := hex.DecodeString(c.wantHex)
+                       if err != nil {
+                               t.Fatal(err)
+                       }
+                       if !bytes.Equal(prog, want) {
+                               t.Errorf("got %x, want %x", prog, want)
+                       }
+               })
+       }
+}
diff --git a/protocol/vm/vmutil/script.go b/protocol/vm/vmutil/script.go
new file mode 100644 (file)
index 0000000..8f9b46f
--- /dev/null
@@ -0,0 +1,152 @@
+package vmutil
+
+import (
+       "chain/crypto/ed25519"
+       "chain/errors"
+       "chain/protocol/vm"
+)
+
+var (
+       ErrBadValue       = errors.New("bad value")
+       ErrMultisigFormat = errors.New("bad multisig program format")
+)
+
+func IsUnspendable(prog []byte) bool {
+       return len(prog) > 0 && prog[0] == byte(vm.OP_FAIL)
+}
+
+// BlockMultiSigProgram returns a valid multisignature consensus
+// program where nrequired of the keys in pubkeys are required to have
+// signed the block for success.  An ErrBadValue will be returned if
+// nrequired is larger than the number of keys provided.  The result
+// is: BLOCKHASH <pubkey>... <nrequired> <npubkeys> CHECKMULTISIG
+func BlockMultiSigProgram(pubkeys []ed25519.PublicKey, nrequired int) ([]byte, error) {
+       err := checkMultiSigParams(int64(nrequired), int64(len(pubkeys)))
+       if err != nil {
+               return nil, err
+       }
+       builder := NewBuilder()
+       builder.AddOp(vm.OP_BLOCKHASH)
+       for _, key := range pubkeys {
+               builder.AddData(key)
+       }
+       builder.AddInt64(int64(nrequired)).AddInt64(int64(len(pubkeys))).AddOp(vm.OP_CHECKMULTISIG)
+       return builder.Build()
+}
+
+func ParseBlockMultiSigProgram(script []byte) ([]ed25519.PublicKey, int, error) {
+       pops, err := vm.ParseProgram(script)
+       if err != nil {
+               return nil, 0, err
+       }
+       if len(pops) < 4 {
+               return nil, 0, vm.ErrShortProgram
+       }
+       if pops[len(pops)-1].Op != vm.OP_CHECKMULTISIG {
+               return nil, 0, errors.Wrap(ErrMultisigFormat, "no OP_CHECKMULTISIG")
+       }
+       npubkeys, err := vm.AsInt64(pops[len(pops)-2].Data)
+       if err != nil {
+               return nil, 0, errors.Wrap(ErrMultisigFormat, "parsing npubkeys")
+       }
+       if int(npubkeys) != len(pops)-4 {
+               return nil, 0, vm.ErrShortProgram
+       }
+       nrequired, err := vm.AsInt64(pops[len(pops)-3].Data)
+       if err != nil {
+               return nil, 0, errors.Wrap(ErrMultisigFormat, "parsing nrequired")
+       }
+       err = checkMultiSigParams(nrequired, npubkeys)
+       if err != nil {
+               return nil, 0, err
+       }
+
+       firstPubkeyIndex := len(pops) - 3 - int(npubkeys)
+
+       pubkeys := make([]ed25519.PublicKey, 0, npubkeys)
+       for i := firstPubkeyIndex; i < firstPubkeyIndex+int(npubkeys); i++ {
+               if len(pops[i].Data) != ed25519.PublicKeySize {
+                       return nil, 0, err
+               }
+               pubkeys = append(pubkeys, ed25519.PublicKey(pops[i].Data))
+       }
+       return pubkeys, int(nrequired), nil
+}
+
+func P2SPMultiSigProgram(pubkeys []ed25519.PublicKey, nrequired int) ([]byte, error) {
+       err := checkMultiSigParams(int64(nrequired), int64(len(pubkeys)))
+       if err != nil {
+               return nil, err
+       }
+       builder := NewBuilder()
+       // Expected stack: [... NARGS SIG SIG SIG PREDICATE]
+       // Number of sigs must match nrequired.
+       builder.AddOp(vm.OP_DUP).AddOp(vm.OP_TOALTSTACK) // stash a copy of the predicate
+       builder.AddOp(vm.OP_SHA3)                        // stack is now [... NARGS SIG SIG SIG PREDICATEHASH]
+       for _, p := range pubkeys {
+               builder.AddData(p)
+       }
+       builder.AddInt64(int64(nrequired))                     // stack is now [... SIG SIG SIG PREDICATEHASH PUB PUB PUB M]
+       builder.AddInt64(int64(len(pubkeys)))                  // stack is now [... SIG SIG SIG PREDICATEHASH PUB PUB PUB M N]
+       builder.AddOp(vm.OP_CHECKMULTISIG).AddOp(vm.OP_VERIFY) // stack is now [... NARGS]
+       builder.AddOp(vm.OP_FROMALTSTACK)                      // stack is now [... NARGS PREDICATE]
+       builder.AddInt64(0).AddOp(vm.OP_CHECKPREDICATE)
+       return builder.Build()
+}
+
+func ParseP2SPMultiSigProgram(program []byte) ([]ed25519.PublicKey, int, error) {
+       pops, err := vm.ParseProgram(program)
+       if err != nil {
+               return nil, 0, err
+       }
+       if len(pops) < 11 {
+               return nil, 0, vm.ErrShortProgram
+       }
+
+       // Count all instructions backwards from the end in case there are
+       // extra instructions at the beginning of the program (like a
+       // <pushdata> DROP).
+
+       npubkeys, err := vm.AsInt64(pops[len(pops)-6].Data)
+       if err != nil {
+               return nil, 0, err
+       }
+       if int(npubkeys) > len(pops)-10 {
+               return nil, 0, vm.ErrShortProgram
+       }
+       nrequired, err := vm.AsInt64(pops[len(pops)-7].Data)
+       if err != nil {
+               return nil, 0, err
+       }
+       err = checkMultiSigParams(nrequired, npubkeys)
+       if err != nil {
+               return nil, 0, err
+       }
+
+       firstPubkeyIndex := len(pops) - 7 - int(npubkeys)
+
+       pubkeys := make([]ed25519.PublicKey, 0, npubkeys)
+       for i := firstPubkeyIndex; i < firstPubkeyIndex+int(npubkeys); i++ {
+               if len(pops[i].Data) != ed25519.PublicKeySize {
+                       return nil, 0, err
+               }
+               pubkeys = append(pubkeys, ed25519.PublicKey(pops[i].Data))
+       }
+       return pubkeys, int(nrequired), nil
+}
+
+func checkMultiSigParams(nrequired, npubkeys int64) error {
+       if nrequired < 0 {
+               return errors.WithDetail(ErrBadValue, "negative quorum")
+       }
+       if npubkeys < 0 {
+               return errors.WithDetail(ErrBadValue, "negative pubkey count")
+       }
+       if nrequired > npubkeys {
+               return errors.WithDetail(ErrBadValue, "quorum too big")
+       }
+       if nrequired == 0 && npubkeys > 0 {
+               return errors.WithDetail(ErrBadValue, "quorum empty with non-empty pubkey list")
+       }
+       return nil
+}
diff --git a/protocol/vm/vmutil/script_test.go b/protocol/vm/vmutil/script_test.go
new file mode 100644 (file)
index 0000000..278f806
--- /dev/null
@@ -0,0 +1,110 @@
+package vmutil
+
+import (
+       "bytes"
+       "testing"
+
+       "chain/crypto/ed25519"
+)
+
+// TestIsUnspendable ensures the IsUnspendable function returns the expected
+// results.
+func TestIsUnspendable(t *testing.T) {
+       tests := []struct {
+               pkScript []byte
+               expected bool
+       }{
+               {
+                       // Unspendable
+                       pkScript: []byte{0x6a, 0x04, 0x74, 0x65, 0x73, 0x74},
+                       expected: true,
+               },
+               {
+                       // Spendable
+                       pkScript: []byte{0x76, 0xa9, 0x14, 0x29, 0x95, 0xa0,
+                               0xfe, 0x68, 0x43, 0xfa, 0x9b, 0x95, 0x45,
+                               0x97, 0xf0, 0xdc, 0xa7, 0xa4, 0x4d, 0xf6,
+                               0xfa, 0x0b, 0x5c, 0x88, 0xac},
+                       expected: false,
+               },
+       }
+
+       for i, test := range tests {
+               res := IsUnspendable(test.pkScript)
+               if res != test.expected {
+                       t.Errorf("TestIsUnspendable #%d failed: got %v want %v",
+                               i, res, test.expected)
+                       continue
+               }
+       }
+}
+
+func Test00Multisig(t *testing.T) {
+       prog, err := BlockMultiSigProgram(nil, 0)
+       if err != nil {
+               t.Fatal(err)
+       }
+       if len(prog) < 1 {
+               t.Fatal("BlockMultiSigScript(0, 0) = {} want script")
+       }
+}
+
+func Test01Multisig(t *testing.T) {
+       pubkeys := []ed25519.PublicKey{{}}
+       _, err := BlockMultiSigProgram(pubkeys, 0)
+       if err == nil {
+               t.Fatal("BlockMultiSigScript(1, 0) = success want error")
+       }
+}
+
+func TestParse00Multisig(t *testing.T) {
+       prog, err := BlockMultiSigProgram(nil, 0)
+       if err != nil {
+               t.Fatal(err)
+       }
+       keys, quorum, err := ParseBlockMultiSigProgram(prog)
+       if err != nil {
+               t.Fatal(err)
+       }
+       if len(keys) != 0 || quorum != 0 {
+               t.Fatalf("ParseBlockMultiSigScript(%x) = (%v, %d) want (nil, 0)", prog, keys, quorum)
+       }
+}
+
+func TestP2SP(t *testing.T) {
+       pub1, _, _ := ed25519.GenerateKey(nil)
+       pub2, _, _ := ed25519.GenerateKey(nil)
+       prog, _ := P2SPMultiSigProgram([]ed25519.PublicKey{pub1, pub2}, 1)
+       pubs, n, err := ParseP2SPMultiSigProgram(prog)
+       if err != nil {
+               t.Fatal(err)
+       }
+       if n != 1 {
+               t.Errorf("expected nrequired=1, got %d", n)
+       }
+       if !bytes.Equal(pubs[0], pub1) {
+               t.Errorf("expected first pubkey to be %x, got %x", pub1, pubs[0])
+       }
+       if !bytes.Equal(pubs[1], pub2) {
+               t.Errorf("expected second pubkey to be %x, got %x", pub2, pubs[1])
+       }
+}
+
+func TestBlockMultisig(t *testing.T) {
+       pub1, _, _ := ed25519.GenerateKey(nil)
+       pub2, _, _ := ed25519.GenerateKey(nil)
+       prog, _ := BlockMultiSigProgram([]ed25519.PublicKey{pub1, pub2}, 1)
+       pubs, n, err := ParseBlockMultiSigProgram(prog)
+       if err != nil {
+               t.Fatal(err)
+       }
+       if n != 1 {
+               t.Errorf("expected nrequired=1, got %d", n)
+       }
+       if !bytes.Equal(pubs[0], pub1) {
+               t.Errorf("expected first pubkey to be %x, got %x", pub1, pubs[0])
+       }
+       if !bytes.Equal(pubs[1], pub2) {
+               t.Errorf("expected second pubkey to be %x, got %x", pub2, pubs[1])
+       }
+}