9 log "github.com/sirupsen/logrus"
10 "github.com/tendermint/tmlibs/common"
12 "github.com/bytom/bytom/consensus"
13 dbm "github.com/bytom/bytom/database/leveldb"
14 "github.com/bytom/bytom/database/storage"
15 "github.com/bytom/bytom/errors"
16 "github.com/bytom/bytom/protocol"
17 "github.com/bytom/bytom/protocol/bc"
18 "github.com/bytom/bytom/protocol/bc/types"
19 "github.com/bytom/bytom/protocol/state"
22 const logModule = "leveldb"
25 // CheckpointPrefix represent the namespace of checkpoints in db
26 CheckpointPrefix = []byte("CP:")
27 // BlockStoreKey block store key
28 BlockStoreKey = []byte("blockStore")
29 // BlockHeaderIndexPrefix block header index with height
30 BlockHeaderIndexPrefix = []byte("BH:")
33 func loadBlockStoreStateJSON(db dbm.DB) *protocol.BlockStoreState {
34 bytes := db.Get(BlockStoreKey)
38 bsj := &protocol.BlockStoreState{}
39 if err := json.Unmarshal(bytes, bsj); err != nil {
40 common.PanicCrisis(common.Fmt("Could not unmarshal bytes: %X", bytes))
45 // A Store encapsulates storage for blockchain validation.
46 // It satisfies the interface protocol.Store, and provides additional
47 // methods for querying current data.
53 // NewStore creates and returns a new Store object.
54 func NewStore(db dbm.DB) *Store {
55 fillBlockHeaderFn := func(hash *bc.Hash) (*types.BlockHeader, error) {
56 return GetBlockHeader(db, hash)
59 fillBlockTxsFn := func(hash *bc.Hash) ([]*types.Tx, error) {
60 return GetBlockTransactions(db, hash)
63 fillBlockHashesFn := func(height uint64) ([]*bc.Hash, error) {
64 return GetBlockHashesByHeight(db, height)
67 fillMainChainHashFn := func(height uint64) (*bc.Hash, error) {
68 return GetMainChainHash(db, height)
71 cache := newCache(fillBlockHeaderFn, fillBlockTxsFn, fillBlockHashesFn, fillMainChainHashFn)
78 // GetBlockHeader return the BlockHeader by given hash
79 func (s *Store) GetBlockHeader(hash *bc.Hash) (*types.BlockHeader, error) {
80 return s.cache.lookupBlockHeader(hash)
83 // GetUtxo will search the utxo in db
84 func (s *Store) GetUtxo(hash *bc.Hash) (*storage.UtxoEntry, error) {
85 return getUtxo(s.db, hash)
88 func (s *Store) GetContract(hash [32]byte) ([]byte, error) {
89 return getContract(s.db, hash)
92 // BlockExist check if the block is stored in disk
93 func (s *Store) BlockExist(hash *bc.Hash) bool {
94 _, err := s.cache.lookupBlockHeader(hash)
98 // SaveBlockHeader persists a new block header in the protocol.
99 func (s *Store) SaveBlockHeader(blockHeader *types.BlockHeader) error {
100 binaryBlockHeader, err := blockHeader.MarshalText()
102 return errors.Wrap(err, "Marshal block header")
105 blockHash := blockHeader.Hash()
106 s.db.Set(CalcBlockHeaderKey(&blockHash), binaryBlockHeader)
107 s.cache.removeBlockHeader(blockHeader)
111 // GetBlockHashesByHeight return the block hash by the specified height
112 func (s *Store) GetBlockHashesByHeight(height uint64) ([]*bc.Hash, error) {
113 return s.cache.lookupBlockHashesByHeight(height)
116 // GetMainChainHash return the block hash by the specified height
117 func (s *Store) GetMainChainHash(height uint64) (*bc.Hash, error) {
118 return s.cache.lookupMainChainHash(height)
121 // SaveBlock persists a new block in the protocol.
122 func (s *Store) SaveBlock(block *types.Block) error {
123 startTime := time.Now()
124 binaryBlockHeader, err := block.MarshalTextForBlockHeader()
126 return errors.Wrap(err, "Marshal block header")
129 binaryBlockTxs, err := block.MarshalTextForTransactions()
131 return errors.Wrap(err, "Marshal block transactions")
134 blockHashes := []*bc.Hash{}
135 hashes, err := s.GetBlockHashesByHeight(block.Height)
140 blockHashes = append(blockHashes, hashes...)
141 blockHash := block.Hash()
142 blockHashes = append(blockHashes, &blockHash)
143 binaryBlockHashes, err := json.Marshal(blockHashes)
145 return errors.Wrap(err, "Marshal block hashes")
148 batch := s.db.NewBatch()
149 batch.Set(CalcBlockHashesKey(block.Height), binaryBlockHashes)
150 batch.Set(CalcBlockHeaderKey(&blockHash), binaryBlockHeader)
151 batch.Set(CalcBlockTransactionsKey(&blockHash), binaryBlockTxs)
152 batch.Set(CalcBlockHeaderIndexKey(block.Height, &blockHash), binaryBlockHeader)
155 s.cache.removeBlockHashes(block.Height)
156 log.WithFields(log.Fields{
158 "height": block.Height,
159 "hash": blockHash.String(),
160 "duration": time.Since(startTime),
161 }).Info("block saved on disk")
165 // GetBlockTransactions return the Block transactions by given hash
166 func (s *Store) GetBlockTransactions(hash *bc.Hash) ([]*types.Tx, error) {
167 return s.cache.lookupBlockTxs(hash)
170 // GetBlock return the block by given hash
171 func (s *Store) GetBlock(hash *bc.Hash) (*types.Block, error) {
172 blockHeader, err := s.GetBlockHeader(hash)
177 txs, err := s.GetBlockTransactions(hash)
183 BlockHeader: *blockHeader,
188 // GetTransactionsUtxo will return all the utxo that related to the input txs
189 func (s *Store) GetTransactionsUtxo(view *state.UtxoViewpoint, txs []*bc.Tx) error {
190 return getTransactionsUtxo(s.db, view, txs)
193 // GetStoreStatus return the BlockStoreStateJSON
194 func (s *Store) GetStoreStatus() *protocol.BlockStoreState {
195 return loadBlockStoreStateJSON(s.db)
198 // SaveChainStatus save the core's newest status && delete old status
199 func (s *Store) SaveChainStatus(blockHeader *types.BlockHeader, mainBlockHeaders []*types.BlockHeader, view *state.UtxoViewpoint, contractView *state.ContractViewpoint, finalizedHeight uint64, finalizedHash *bc.Hash) error {
200 batch := s.db.NewBatch()
201 if err := saveUtxoView(batch, view); err != nil {
205 if err := deleteContractView(s.db, batch, contractView); err != nil {
209 if err := saveContractView(s.db, batch, contractView); err != nil {
213 blockHeaderHash := blockHeader.Hash()
214 bytes, err := json.Marshal(
215 protocol.BlockStoreState{
216 Height: blockHeader.Height,
217 Hash: &blockHeaderHash,
218 FinalizedHeight: finalizedHeight,
219 FinalizedHash: finalizedHash,
225 batch.Set(BlockStoreKey, bytes)
227 var clearCacheFuncs []func()
228 // save main chain blockHeaders
229 for _, blockHeader := range mainBlockHeaders {
231 blockHash := bh.Hash()
232 binaryBlockHash, err := blockHash.MarshalText()
234 return errors.Wrap(err, "Marshal block hash")
237 batch.Set(calcMainChainIndexPrefix(bh.Height), binaryBlockHash)
238 clearCacheFuncs = append(clearCacheFuncs, func() {
239 s.cache.removeMainChainHash(bh.Height)
243 for _, clearCacheFunc := range clearCacheFuncs {
250 func calcCheckpointKey(height uint64, hash *bc.Hash) []byte {
251 buf := make([]byte, 8)
252 binary.BigEndian.PutUint64(buf, height)
253 key := append(CheckpointPrefix, buf...)
255 key = append(key, hash.Bytes()...)
260 func (s *Store) GetCheckpoint(hash *bc.Hash) (*state.Checkpoint, error) {
261 header, err := s.GetBlockHeader(hash)
266 data := s.db.Get(calcCheckpointKey(header.Height, hash))
267 checkpoint := &state.Checkpoint{}
268 if err := json.Unmarshal(data, checkpoint); err != nil {
272 setSupLinkToCheckpoint(checkpoint, header.SupLinks)
273 return checkpoint, nil
276 // GetCheckpointsByHeight return all checkpoints of specified block height
277 func (s *Store) GetCheckpointsByHeight(height uint64) ([]*state.Checkpoint, error) {
278 iter := s.db.IteratorPrefix(calcCheckpointKey(height, nil))
280 return s.loadCheckpointsFromIter(iter)
283 // CheckpointsFromNode return all checkpoints from specified block height and hash
284 func (s *Store) CheckpointsFromNode(height uint64, hash *bc.Hash) ([]*state.Checkpoint, error) {
285 startKey := calcCheckpointKey(height, hash)
286 iter := s.db.IteratorPrefixWithStart(CheckpointPrefix, startKey, false)
288 firstCheckpoint := &state.Checkpoint{}
289 if err := json.Unmarshal(iter.Value(), firstCheckpoint); err != nil {
293 checkpoints := []*state.Checkpoint{firstCheckpoint}
294 subs, err := s.loadCheckpointsFromIter(iter)
299 checkpoints = append(checkpoints, subs...)
300 return checkpoints, nil
303 func (s *Store) loadCheckpointsFromIter(iter dbm.Iterator) ([]*state.Checkpoint, error) {
304 var checkpoints []*state.Checkpoint
307 checkpoint := &state.Checkpoint{}
308 if err := json.Unmarshal(iter.Value(), checkpoint); err != nil {
312 header, err := s.GetBlockHeader(&checkpoint.Hash)
317 setSupLinkToCheckpoint(checkpoint, header.SupLinks)
318 checkpoints = append(checkpoints, checkpoint)
320 return checkpoints, nil
323 // SaveCheckpoints bulk save multiple checkpoint
324 func (s *Store) SaveCheckpoints(checkpoints []*state.Checkpoint) error {
325 batch := s.db.NewBatch()
327 if err := s.saveCheckpoints(batch, checkpoints); err != nil {
335 func (s *Store) saveCheckpoints(batch dbm.Batch, checkpoints []*state.Checkpoint) error {
336 for _, checkpoint := range checkpoints {
337 startTime := time.Now()
338 data, err := json.Marshal(checkpoint)
343 if checkpoint.Height%consensus.ActiveNetParams.BlocksOfEpoch != 1 {
344 header, err := s.GetBlockHeader(&checkpoint.Hash)
349 batch.Delete(calcCheckpointKey(header.Height-1, &header.PreviousBlockHash))
352 batch.Set(calcCheckpointKey(checkpoint.Height, &checkpoint.Hash), data)
353 log.WithFields(log.Fields{
355 "height": checkpoint.Height,
356 "hash": checkpoint.Hash.String(),
357 "status": checkpoint.Status,
358 "duration": time.Since(startTime),
359 }).Info("checkpoint saved on disk")
364 func setSupLinkToCheckpoint(c *state.Checkpoint, supLinks types.SupLinks) {
365 for _, supLink := range supLinks {
366 var signatures [consensus.MaxNumOfValidators]string
367 for i, signature := range supLink.Signatures {
368 signatures[i] = hex.EncodeToString(signature)
371 c.SupLinks = append(c.SupLinks, &state.SupLink{
372 SourceHeight: supLink.SourceHeight,
373 SourceHash: supLink.SourceHash,
374 Signatures: signatures,