1 // Copyright (c) 2015-2016 The btcsuite developers
2 // Use of this source code is governed by an ISC
3 // license that can be found in the LICENSE file.
15 "github.com/btcsuite/btcd/chaincfg/chainhash"
16 "github.com/btcsuite/btcd/database"
17 "github.com/btcsuite/btcd/wire"
18 "github.com/btcsuite/btcutil"
21 // importCmd defines the configuration options for the insecureimport command.
22 type importCmd struct {
23 InFile string `short:"i" long:"infile" description:"File containing the block(s)"`
24 Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"`
28 // importCfg defines the configuration options for the command.
29 importCfg = importCmd{
30 InFile: "bootstrap.dat",
34 // zeroHash is a simply a hash with all zeros. It is defined here to
35 // avoid creating it multiple times.
36 zeroHash = chainhash.Hash{}
39 // importResults houses the stats and result as an import operation.
40 type importResults struct {
46 // blockImporter houses information about an ongoing import from a block data
47 // file to the block database.
48 type blockImporter struct {
51 processQueue chan []byte
58 receivedLogBlocks int64
61 lastBlockTime time.Time
65 // readBlock reads the next block from the input file.
66 func (bi *blockImporter) readBlock() ([]byte, error) {
67 // The block file format is:
68 // <network> <block length> <serialized block>
70 err := binary.Read(bi.r, binary.LittleEndian, &net)
76 // No block and no error means there are no more blocks to read.
79 if net != uint32(activeNetParams.Net) {
80 return nil, fmt.Errorf("network mismatch -- got %x, want %x",
81 net, uint32(activeNetParams.Net))
84 // Read the block length and ensure it is sane.
86 if err := binary.Read(bi.r, binary.LittleEndian, &blockLen); err != nil {
89 if blockLen > wire.MaxBlockPayload {
90 return nil, fmt.Errorf("block payload of %d bytes is larger "+
91 "than the max allowed %d bytes", blockLen,
95 serializedBlock := make([]byte, blockLen)
96 if _, err := io.ReadFull(bi.r, serializedBlock); err != nil {
100 return serializedBlock, nil
103 // processBlock potentially imports the block into the database. It first
104 // deserializes the raw block while checking for errors. Already known blocks
105 // are skipped and orphan blocks are considered errors. Returns whether the
106 // block was imported along with any potential errors.
108 // NOTE: This is not a safe import as it does not verify chain rules.
109 func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
110 // Deserialize the block which includes checks for malformed blocks.
111 block, err := btcutil.NewBlockFromBytes(serializedBlock)
116 // update progress statistics
117 bi.lastBlockTime = block.MsgBlock().Header.Timestamp
118 bi.receivedLogTx += int64(len(block.MsgBlock().Transactions))
120 // Skip blocks that already exist.
122 err = bi.db.View(func(tx database.Tx) error {
123 exists, err = tx.HasBlock(block.Hash())
133 // Don't bother trying to process orphans.
134 prevHash := &block.MsgBlock().Header.PrevBlock
135 if !prevHash.IsEqual(&zeroHash) {
137 err := bi.db.View(func(tx database.Tx) error {
138 exists, err = tx.HasBlock(prevHash)
145 return false, fmt.Errorf("import file contains block "+
146 "%v which does not link to the available "+
147 "block chain", prevHash)
151 // Put the blocks into the database with no checking of chain rules.
152 err = bi.db.Update(func(tx database.Tx) error {
153 return tx.StoreBlock(block)
162 // readHandler is the main handler for reading blocks from the import file.
163 // This allows block processing to take place in parallel with block reads.
164 // It must be run as a goroutine.
165 func (bi *blockImporter) readHandler() {
168 // Read the next block from the file and if anything goes wrong
169 // notify the status handler with the error and bail.
170 serializedBlock, err := bi.readBlock()
172 bi.errChan <- fmt.Errorf("Error reading from input "+
173 "file: %v", err.Error())
177 // A nil block with no error means we're done.
178 if serializedBlock == nil {
182 // Send the block or quit if we've been signalled to exit by
183 // the status handler due to an error elsewhere.
185 case bi.processQueue <- serializedBlock:
191 // Close the processing channel to signal no more blocks are coming.
192 close(bi.processQueue)
196 // logProgress logs block progress as an information message. In order to
197 // prevent spam, it limits logging to one message every importCfg.Progress
198 // seconds with duration and totals included.
199 func (bi *blockImporter) logProgress() {
200 bi.receivedLogBlocks++
203 duration := now.Sub(bi.lastLogTime)
204 if duration < time.Second*time.Duration(importCfg.Progress) {
208 // Truncate the duration to 10s of milliseconds.
209 durationMillis := int64(duration / time.Millisecond)
210 tDuration := 10 * time.Millisecond * time.Duration(durationMillis/10)
212 // Log information about new block height.
214 if bi.receivedLogBlocks == 1 {
217 txStr := "transactions"
218 if bi.receivedLogTx == 1 {
219 txStr = "transaction"
221 log.Infof("Processed %d %s in the last %s (%d %s, height %d, %s)",
222 bi.receivedLogBlocks, blockStr, tDuration, bi.receivedLogTx,
223 txStr, bi.lastHeight, bi.lastBlockTime)
225 bi.receivedLogBlocks = 0
230 // processHandler is the main handler for processing blocks. This allows block
231 // processing to take place in parallel with block reads from the import file.
232 // It must be run as a goroutine.
233 func (bi *blockImporter) processHandler() {
237 case serializedBlock, ok := <-bi.processQueue:
238 // We're done when the channel is closed.
245 imported, err := bi.processBlock(serializedBlock)
264 // statusHandler waits for updates from the import operation and notifies
265 // the passed doneChan with the results of the import. It also causes all
266 // goroutines to exit if an error is reported from any of them.
267 func (bi *blockImporter) statusHandler(resultsChan chan *importResults) {
269 // An error from either of the goroutines means we're done so signal
270 // caller with the error and signal all goroutines to quit.
271 case err := <-bi.errChan:
272 resultsChan <- &importResults{
273 blocksProcessed: bi.blocksProcessed,
274 blocksImported: bi.blocksImported,
279 // The import finished normally.
281 resultsChan <- &importResults{
282 blocksProcessed: bi.blocksProcessed,
283 blocksImported: bi.blocksImported,
289 // Import is the core function which handles importing the blocks from the file
290 // associated with the block importer to the database. It returns a channel
291 // on which the results will be returned when the operation has completed.
292 func (bi *blockImporter) Import() chan *importResults {
293 // Start up the read and process handling goroutines. This setup allows
294 // blocks to be read from disk in parallel while being processed.
297 go bi.processHandler()
299 // Wait for the import to finish in a separate goroutine and signal
300 // the status handler when done.
306 // Start the status handler and return the result channel that it will
307 // send the results on when the import is done.
308 resultChan := make(chan *importResults)
309 go bi.statusHandler(resultChan)
313 // newBlockImporter returns a new importer for the provided file reader seeker
315 func newBlockImporter(db database.DB, r io.ReadSeeker) *blockImporter {
316 return &blockImporter{
319 processQueue: make(chan []byte, 2),
320 doneChan: make(chan bool),
321 errChan: make(chan error),
322 quit: make(chan struct{}),
323 lastLogTime: time.Now(),
327 // Execute is the main entry point for the command. It's invoked by the parser.
328 func (cmd *importCmd) Execute(args []string) error {
329 // Setup the global config options and ensure they are valid.
330 if err := setupGlobalConfig(); err != nil {
334 // Ensure the specified block file exists.
335 if !fileExists(cmd.InFile) {
336 str := "The specified block file [%v] does not exist"
337 return fmt.Errorf(str, cmd.InFile)
340 // Load the block database.
341 db, err := loadBlockDB()
347 // Ensure the database is sync'd and closed on Ctrl+C.
348 addInterruptHandler(func() {
349 log.Infof("Gracefully shutting down the database...")
353 fi, err := os.Open(importCfg.InFile)
359 // Create a block importer for the database and input file and start it.
360 // The results channel returned from start will contain an error if
361 // anything went wrong.
362 importer := newBlockImporter(db, fi)
364 // Perform the import asynchronously and signal the main goroutine when
365 // done. This allows blocks to be processed and read in parallel. The
366 // results channel returned from Import contains the statistics about
367 // the import including an error if something went wrong. This is done
368 // in a separate goroutine rather than waiting directly so the main
369 // goroutine can be signaled for shutdown by either completion, error,
370 // or from the main interrupt handler. This is necessary since the main
371 // goroutine must be kept running long enough for the interrupt handler
372 // goroutine to finish.
374 log.Info("Starting import")
375 resultsChan := importer.Import()
376 results := <-resultsChan
377 if results.err != nil {
378 dbErr, ok := results.err.(database.Error)
379 if !ok || ok && dbErr.ErrorCode != database.ErrDbNotOpen {
380 shutdownChannel <- results.err
385 log.Infof("Processed a total of %d blocks (%d imported, %d "+
386 "already known)", results.blocksProcessed,
387 results.blocksImported,
388 results.blocksProcessed-results.blocksImported)
389 shutdownChannel <- nil
392 // Wait for shutdown signal from either a normal completion or from the
393 // interrupt handler.
394 err = <-shutdownChannel