import (
"encoding/binary"
- "encoding/hex"
"encoding/json"
"time"
return GetBlockHashesByHeight(db, height)
}
- cache := newCache(fillBlockHeaderFn, fillBlockTxsFn, fillBlockHashesFn)
+ fillMainChainHashFn := func(height uint64) (*bc.Hash, error) {
+ return GetMainChainHash(db, height)
+ }
+
+ cache := newCache(fillBlockHeaderFn, fillBlockTxsFn, fillBlockHashesFn, fillMainChainHashFn)
return &Store{
db: db,
cache: cache,
return s.cache.lookupBlockHashesByHeight(height)
}
+// GetMainChainHash return the block hash by the specified height
+func (s *Store) GetMainChainHash(height uint64) (*bc.Hash, error) {
+ return s.cache.lookupMainChainHash(height)
+}
+
// SaveBlock persists a new block in the protocol.
func (s *Store) SaveBlock(block *types.Block) error {
startTime := time.Now()
return loadBlockStoreStateJSON(s.db)
}
-// LoadBlockIndex loadblockIndex by bestHeight
-func (s *Store) LoadBlockIndex(stateBestHeight uint64) (*state.BlockIndex, error) {
- startTime := time.Now()
- blockIndex := state.NewBlockIndex()
- bhIter := s.db.IteratorPrefix(BlockHeaderIndexPrefix)
- defer bhIter.Release()
-
- var lastNode *state.BlockNode
- for bhIter.Next() {
- bh := &types.BlockHeader{}
- if err := bh.UnmarshalText(bhIter.Value()); err != nil {
- return nil, err
- }
-
- // If a block with a height greater than the best height of state is added to the index,
- // It may cause a bug that the new block cant not be process properly.
- if bh.Height > stateBestHeight {
- break
- }
-
- var parent *state.BlockNode
- if lastNode == nil || lastNode.Hash == bh.PreviousBlockHash {
- parent = lastNode
- } else {
- parent = blockIndex.GetNode(&bh.PreviousBlockHash)
- }
-
- node, err := state.NewBlockNode(bh, parent)
- if err != nil {
- return nil, err
- }
-
- blockIndex.AddNode(node)
- lastNode = node
- }
-
- log.WithFields(log.Fields{
- "module": logModule,
- "height": stateBestHeight,
- "duration": time.Since(startTime),
- }).Debug("initialize load history block index from database")
- return blockIndex, nil
-}
-
// SaveChainStatus save the core's newest status && delete old status
-func (s *Store) SaveChainStatus(node *state.BlockNode, view *state.UtxoViewpoint, contractView *state.ContractViewpoint, finalizedHeight uint64, finalizedHash *bc.Hash) error {
+func (s *Store) SaveChainStatus(blockHeader *types.BlockHeader, mainBlockHeaders []*types.BlockHeader, view *state.UtxoViewpoint, contractView *state.ContractViewpoint, finalizedHeight uint64, finalizedHash *bc.Hash) error {
batch := s.db.NewBatch()
if err := saveUtxoView(batch, view); err != nil {
return err
return err
}
- bytes, err := json.Marshal(protocol.BlockStoreState{Height: node.Height, Hash: &node.Hash, FinalizedHeight: finalizedHeight, FinalizedHash: finalizedHash})
+ blockHeaderHash := blockHeader.Hash()
+ bytes, err := json.Marshal(
+ protocol.BlockStoreState{
+ Height: blockHeader.Height,
+ Hash: &blockHeaderHash,
+ FinalizedHeight: finalizedHeight,
+ FinalizedHash: finalizedHash,
+ })
if err != nil {
return err
}
batch.Set(BlockStoreKey, bytes)
+
+ var clearCacheFuncs []func()
+ // save main chain blockHeaders
+ for _, blockHeader := range mainBlockHeaders {
+ bh := blockHeader
+ blockHash := bh.Hash()
+ binaryBlockHash, err := blockHash.MarshalText()
+ if err != nil {
+ return errors.Wrap(err, "Marshal block hash")
+ }
+
+ batch.Set(calcMainChainIndexPrefix(bh.Height), binaryBlockHash)
+ clearCacheFuncs = append(clearCacheFuncs, func() {
+ s.cache.removeMainChainHash(bh.Height)
+ })
+ }
batch.Write()
+ for _, clearCacheFunc := range clearCacheFuncs {
+ clearCacheFunc()
+ }
+
return nil
}
return nil, err
}
- setSupLinkToCheckpoint(checkpoint, header.SupLinks)
+ checkpoint.SupLinks = append(checkpoint.SupLinks, header.SupLinks...)
return checkpoint, nil
}
return nil, err
}
- setSupLinkToCheckpoint(checkpoint, header.SupLinks)
+ checkpoint.SupLinks = append(checkpoint.SupLinks, header.SupLinks...)
checkpoints = append(checkpoints, checkpoint)
}
return checkpoints, nil
return err
}
- if checkpoint.Height%state.BlocksOfEpoch != 1 {
+ if checkpoint.Height%consensus.ActiveNetParams.BlocksOfEpoch != 1 {
header, err := s.GetBlockHeader(&checkpoint.Hash)
if err != nil {
return err
batch.Set(calcCheckpointKey(checkpoint.Height, &checkpoint.Hash), data)
log.WithFields(log.Fields{
- "module": logModule,
- "height": checkpoint.Height,
- "hash": checkpoint.Hash.String(),
- "status": checkpoint.Status,
+ "module": logModule,
+ "height": checkpoint.Height,
+ "hash": checkpoint.Hash.String(),
+ "status": checkpoint.Status,
"duration": time.Since(startTime),
}).Info("checkpoint saved on disk")
}
return nil
}
-
-func setSupLinkToCheckpoint(c *state.Checkpoint, supLinks types.SupLinks) {
- for _, supLink := range supLinks {
- var signatures [consensus.MaxNumOfValidators]string
- for i, signature := range supLink.Signatures {
- signatures[i] = hex.EncodeToString(signature)
- }
-
- c.SupLinks = append(c.SupLinks, &state.SupLink{
- SourceHeight: supLink.SourceHeight,
- SourceHash: supLink.SourceHash,
- Signatures: signatures,
- })
- }
-}