From: jeason Date: Tue, 1 Aug 2017 07:53:19 +0000 (-0700) Subject: add pow interface solveblock X-Git-Tag: v1.0.5~511^2~6 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=5ad47858966aa7ae7e9d4d6eb1aa168d2962042c;p=bytom%2Fbytom.git add pow interface solveblock --- diff --git a/rpc/chainhash/chainhash.go b/rpc/chainhash/chainhash.go new file mode 100644 index 00000000..2b1cec02 --- /dev/null +++ b/rpc/chainhash/chainhash.go @@ -0,0 +1,128 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Copyright (c) 2015 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package chainhash + +import ( + "encoding/hex" + "fmt" +) + +// HashSize of array used to store hashes. See Hash. +const HashSize = 32 + +// MaxHashStringSize is the maximum length of a Hash hash string. +const MaxHashStringSize = HashSize * 2 + +// ErrHashStrSize describes an error that indicates the caller specified a hash +// string that has too many characters. +var ErrHashStrSize = fmt.Errorf("max hash string length is %v bytes", MaxHashStringSize) + +// Hash is used in several of the bitcoin messages and common structures. It +// typically represents the double sha256 of data. +type Hash [HashSize]byte + +// String returns the Hash as the hexadecimal string of the byte-reversed +// hash. +func (hash Hash) String() string { + for i := 0; i < HashSize/2; i++ { + hash[i], hash[HashSize-1-i] = hash[HashSize-1-i], hash[i] + } + return hex.EncodeToString(hash[:]) +} + +// CloneBytes returns a copy of the bytes which represent the hash as a byte +// slice. +// +// NOTE: It is generally cheaper to just slice the hash directly thereby reusing +// the same bytes rather than calling this method. +func (hash *Hash) CloneBytes() []byte { + newHash := make([]byte, HashSize) + copy(newHash, hash[:]) + + return newHash +} + +// SetBytes sets the bytes which represent the hash. An error is returned if +// the number of bytes passed in is not HashSize. +func (hash *Hash) SetBytes(newHash []byte) error { + nhlen := len(newHash) + if nhlen != HashSize { + return fmt.Errorf("invalid hash length of %v, want %v", nhlen, + HashSize) + } + copy(hash[:], newHash) + + return nil +} + +// IsEqual returns true if target is the same as hash. +func (hash *Hash) IsEqual(target *Hash) bool { + if hash == nil && target == nil { + return true + } + if hash == nil || target == nil { + return false + } + return *hash == *target +} + +// NewHash returns a new Hash from a byte slice. An error is returned if +// the number of bytes passed in is not HashSize. +func NewHash(newHash []byte) (*Hash, error) { + var sh Hash + err := sh.SetBytes(newHash) + if err != nil { + return nil, err + } + return &sh, err +} + +// NewHashFromStr creates a Hash from a hash string. The string should be +// the hexadecimal string of a byte-reversed hash, but any missing characters +// result in zero padding at the end of the Hash. +func NewHashFromStr(hash string) (*Hash, error) { + ret := new(Hash) + err := Decode(ret, hash) + if err != nil { + return nil, err + } + return ret, nil +} + +// Decode decodes the byte-reversed hexadecimal string encoding of a Hash to a +// destination. +func Decode(dst *Hash, src string) error { + // Return error if hash string is too long. + if len(src) > MaxHashStringSize { + return ErrHashStrSize + } + + // Hex decoder expects the hash to be a multiple of two. When not, pad + // with a leading zero. + var srcBytes []byte + if len(src)%2 == 0 { + srcBytes = []byte(src) + } else { + srcBytes = make([]byte, 1+len(src)) + srcBytes[0] = '0' + copy(srcBytes[1:], src) + } + + // Hex decode the source bytes to a temporary destination. + var reversedHash Hash + _, err := hex.Decode(reversedHash[HashSize-hex.DecodedLen(len(srcBytes)):], srcBytes) + if err != nil { + return err + } + + // Reverse copy from the temporary hash to destination. Because the + // temporary was zeroed, the written result will be correctly padded. + for i, b := range reversedHash[:HashSize/2] { + dst[i], dst[HashSize-1-i] = reversedHash[HashSize-1-i], b + } + + return nil +} diff --git a/rpc/chainhash/hashfunc.go b/rpc/chainhash/hashfunc.go new file mode 100644 index 00000000..bf74f73c --- /dev/null +++ b/rpc/chainhash/hashfunc.go @@ -0,0 +1,33 @@ +// Copyright (c) 2015 The Decred developers +// Copyright (c) 2016-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package chainhash + +import "crypto/sha256" + +// HashB calculates hash(b) and returns the resulting bytes. +func HashB(b []byte) []byte { + hash := sha256.Sum256(b) + return hash[:] +} + +// HashH calculates hash(b) and returns the resulting bytes as a Hash. +func HashH(b []byte) Hash { + return Hash(sha256.Sum256(b)) +} + +// DoubleHashB calculates hash(hash(b)) and returns the resulting bytes. +func DoubleHashB(b []byte) []byte { + first := sha256.Sum256(b) + second := sha256.Sum256(first[:]) + return second[:] +} + +// DoubleHashH calculates hash(hash(b)) and returns the resulting bytes as a +// Hash. +func DoubleHashH(b []byte) Hash { + first := sha256.Sum256(b) + return Hash(sha256.Sum256(first[:])) +} diff --git a/rpc/core/generate.go b/rpc/core/generate.go index 0aff71d4..3210db5e 100644 --- a/rpc/core/generate.go +++ b/rpc/core/generate.go @@ -1,16 +1,66 @@ package core import ( - //"fmt" + //"errors" + "math" + "math/big" + "runtime" + //"time" - ctypes "github.com/blockchain/rpc/core/types" + //"github.com/blockchain/rpc/chainhash" + "github.com/blockchain/rpc/wire" ) -//compute nonce for hash -func solveBlock(header *ctypes.ResultBlockHeaderInfo) bool { - type sbResult struct { +func solveBlock(header *wire.BlockHeader, targetDifficulty *big.Int) bool { + // sbResult is used by the solver goroutines to send results. + type sbResult struct { found bool nonce uint32 } - return true + + // solver accepts a block header and a nonce range to test. It is + // intended to be run as a goroutine. + quit := make(chan bool) + results := make(chan sbResult) + solver := func(hdr wire.BlockHeader, startNonce, stopNonce uint32) { + // We need to modify the nonce field of the header, so make sure + // we work with a copy of the original header. + for i := startNonce; i >= startNonce && i <= stopNonce; i++ { + select { + case <-quit: + return + default: + hdr.Nonce = i + hash := hdr.BlockHash() + if wire.HashToBig(&hash).Cmp(targetDifficulty) <= 0 { + results <- sbResult{true, i} + return + } + } + } + results <- sbResult{false, 0} + } + + startNonce := uint32(1) + stopNonce := uint32(math.MaxUint32) + numCores := uint32(runtime.NumCPU()) + noncesPerCore := (stopNonce - startNonce) / numCores + for i := uint32(0); i < numCores; i++ { + rangeStart := startNonce + (noncesPerCore * i) + rangeStop := startNonce + (noncesPerCore * (i + 1)) - 1 + if i == numCores-1 { + rangeStop = stopNonce + } + go solver(*header, rangeStart, rangeStop) + } + for i := uint32(0); i < numCores; i++ { + result := <-results + if result.found { + close(quit) + header.Nonce = result.nonce + return true + } + } + + return false } diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index 56815dff..e916f892 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -2,6 +2,7 @@ package core_types import ( "strings" + "time" abci "github.com/tendermint/abci/types" "github.com/tendermint/go-crypto" @@ -62,11 +63,11 @@ type ResultNetInfo struct { } type ResultBlockHeaderInfo struct { - Version uint64 `json:"version"` - Height uint64 `json:"height"` + Version int32 `json:"version"` + //Height uint64 `json:"height"` MerkleRoot bc.Hash `json:"merkleroot"` PreviousBlockHash bc.Hash `json:"prevblockhash"` - TimestampMS uint64 `json:"timestamp"` + TimestampMS time.Time `json:"timestamp"` Bits uint64 `json:"bits"` Nonce uint64 `json:"nonce"` } diff --git a/rpc/wire/blockheader.go b/rpc/wire/blockheader.go new file mode 100755 index 00000000..693500b8 --- /dev/null +++ b/rpc/wire/blockheader.go @@ -0,0 +1,149 @@ +package wire + +import ( + "bytes" + "io" + "time" + + "github.com/blockchain/rpc/chainhash" +) + +// MaxBlockHeaderPayload is the maximum number of bytes a block header can be. +// Version 4 bytes + Timestamp 4 bytes + Bits 4 bytes + Nonce 4 bytes + +// PrevBlock and MerkleRoot hashes. +const MaxBlockHeaderPayload = 16 + (chainhash.HashSize * 2) + +type uint32Time time.Time + +// BlockHeader defines information about a block and is used in the bitcoin +// block (MsgBlock) and headers (MsgHeaders) messages. +type BlockHeader struct { + // Version of the block. This is not the same as the protocol version. + Version int32 + + // Hash of the previous block in the block chain. + PrevBlock chainhash.Hash + + // Merkle tree reference to hash of all transactions for the block. + MerkleRoot chainhash.Hash + + // Time the block was created. This is, unfortunately, encoded as a + // uint32 on the wire and therefore is limited to 2106. + Timestamp time.Time + + // Difficulty target for the block. + Bits uint32 + + // Nonce used to generate the block. + Nonce uint32 +} + +// blockHeaderLen is a constant that represents the number of bytes for a block +// header. +const blockHeaderLen = 80 + +// BlockHash computes the block identifier hash for the given block header. +func (h *BlockHeader) BlockHash() chainhash.Hash { + // Encode the header and double sha256 everything prior to the number of + // transactions. Ignore the error returns since there is no way the + // encode could fail except being out of memory which would cause a + // run-time panic. + buf := bytes.NewBuffer(make([]byte, 0, MaxBlockHeaderPayload)) + _ = writeBlockHeader(buf, 0, h) + + return chainhash.DoubleHashH(buf.Bytes()) +} + +// BtcDecode decodes r using the bitcoin protocol encoding into the receiver. +// This is part of the Message interface implementation. +// See Deserialize for decoding block headers stored to disk, such as in a +// database, as opposed to decoding block headers from the wire. +func (h *BlockHeader) BtcDecode(r io.Reader, pver uint32) error { + return readBlockHeader(r, pver, h) +} + +// BtcEncode encodes the receiver to w using the bitcoin protocol encoding. +// This is part of the Message interface implementation. +// See Serialize for encoding block headers to be stored to disk, such as in a +// database, as opposed to encoding block headers for the wire. +func (h *BlockHeader) BtcEncode(w io.Writer, pver uint32) error { + return writeBlockHeader(w, pver, h) +} + +// Deserialize decodes a block header from r into the receiver using a format +// that is suitable for long-term storage such as a database while respecting +// the Version field. +func (h *BlockHeader) Deserialize(r io.Reader) error { + // At the current time, there is no difference between the wire encoding + // at protocol version 0 and the stable long-term storage format. As + // a result, make use of readBlockHeader. + return readBlockHeader(r, 0, h) +} + +// Serialize encodes a block header from r into the receiver using a format +// that is suitable for long-term storage such as a database while respecting +// the Version field. +func (h *BlockHeader) Serialize(w io.Writer) error { + // At the current time, there is no difference between the wire encoding + // at protocol version 0 and the stable long-term storage format. As + // a result, make use of writeBlockHeader. + return writeBlockHeader(w, 0, h) +} + +// NewBlockHeader returns a new BlockHeader using the provided version, previous +// block hash, merkle root hash, difficulty bits, and nonce used to generate the +// block with defaults for the remaining fields. +func NewBlockHeader(version int32, prevHash, merkleRootHash *chainhash.Hash, + bits uint32, nonce uint32) *BlockHeader { + + // Limit the timestamp to one second precision since the protocol + // doesn't support better. + return &BlockHeader{ + Version: version, + PrevBlock: *prevHash, + MerkleRoot: *merkleRootHash, + Timestamp: time.Unix(time.Now().Unix(), 0), + Bits: bits, + Nonce: nonce, + } +} + +// readBlockHeader reads a bitcoin block header from r. See Deserialize for +// decoding block headers stored to disk, such as in a database, as opposed to +// decoding from the wire. +func readBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error { + return readElements(r, &bh.Version, &bh.PrevBlock, &bh.MerkleRoot, + (*uint32Time)(&bh.Timestamp), &bh.Bits, &bh.Nonce) +} +// writeBlockHeader writes a bitcoin block header to w. See Serialize for +// encoding block headers to be stored to disk, such as in a database, as +// opposed to encoding for the wire. +func writeBlockHeader(w io.Writer, pver uint32, bh *BlockHeader) error { + sec := uint32(bh.Timestamp.Unix()) + return writeElements(w, bh.Version, &bh.PrevBlock, &bh.MerkleRoot, + sec, bh.Bits, bh.Nonce) +} + +func writeElements(w io.Writer, elements ...interface{}) error { + /* + for _, element := range elements { + err := writeElement(w, element) + if err != nil { + return err + } + } +*/ + return nil +} + +func readElements(r io.Reader, elements ...interface{}) error { +/* + for _, element := range elements { + err := readElement(r, element) + if err != nil { + return err + } + } +*/ + return nil +} diff --git a/rpc/wire/difficulty.go b/rpc/wire/difficulty.go new file mode 100755 index 00000000..0610830b --- /dev/null +++ b/rpc/wire/difficulty.go @@ -0,0 +1,343 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package wire + +import ( + "math/big" + //"time" + + "github.com/blockchain/rpc/chainhash" + +) + +var ( + // bigOne is 1 represented as a big.Int. It is defined here to avoid + // the overhead of creating it multiple times. + bigOne = big.NewInt(1) + + // oneLsh256 is 1 shifted left 256 bits. It is defined here to avoid + // the overhead of creating it multiple times. + oneLsh256 = new(big.Int).Lsh(bigOne, 256) +) + +// HashToBig converts a chainhash.Hash into a big.Int that can be used to +// perform math comparisons. +func HashToBig(hash *chainhash.Hash) *big.Int { + // A Hash is in little-endian, but the big package wants the bytes in + // big-endian, so reverse them. + buf := *hash + blen := len(buf) + for i := 0; i < blen/2; i++ { + buf[i], buf[blen-1-i] = buf[blen-1-i], buf[i] + } + + return new(big.Int).SetBytes(buf[:]) +} +/* +// CompactToBig converts a compact representation of a whole number N to an +// unsigned 32-bit number. The representation is similar to IEEE754 floating +// point numbers. +// +// Like IEEE754 floating point, there are three basic components: the sign, +// the exponent, and the mantissa. They are broken out as follows: +// +// * the most significant 8 bits represent the unsigned base 256 exponent +// * bit 23 (the 24th bit) represents the sign bit +// * the least significant 23 bits represent the mantissa +// +// ------------------------------------------------- +// | Exponent | Sign | Mantissa | +// ------------------------------------------------- +// | 8 bits [31-24] | 1 bit [23] | 23 bits [22-00] | +// ------------------------------------------------- +// +// The formula to calculate N is: +// N = (-1^sign) * mantissa * 256^(exponent-3) +// +// This compact form is only used in bitcoin to encode unsigned 256-bit numbers +// which represent difficulty targets, thus there really is not a need for a +// sign bit, but it is implemented here to stay consistent with bitcoind. +func CompactToBig(compact uint32) *big.Int { + // Extract the mantissa, sign bit, and exponent. + mantissa := compact & 0x007fffff + isNegative := compact&0x00800000 != 0 + exponent := uint(compact >> 24) + + // Since the base for the exponent is 256, the exponent can be treated + // as the number of bytes to represent the full 256-bit number. So, + // treat the exponent as the number of bytes and shift the mantissa + // right or left accordingly. This is equivalent to: + // N = mantissa * 256^(exponent-3) + var bn *big.Int + if exponent <= 3 { + mantissa >>= 8 * (3 - exponent) + bn = big.NewInt(int64(mantissa)) + } else { + bn = big.NewInt(int64(mantissa)) + bn.Lsh(bn, 8*(exponent-3)) + } + + // Make it negative if the sign bit is set. + if isNegative { + bn = bn.Neg(bn) + } + + return bn +} + +// BigToCompact converts a whole number N to a compact representation using +// an unsigned 32-bit number. The compact representation only provides 23 bits +// of precision, so values larger than (2^23 - 1) only encode the most +// significant digits of the number. See CompactToBig for details. +func BigToCompact(n *big.Int) uint32 { + // No need to do any work if it's zero. + if n.Sign() == 0 { + return 0 + } + + // Since the base for the exponent is 256, the exponent can be treated + // as the number of bytes. So, shift the number right or left + // accordingly. This is equivalent to: + // mantissa = mantissa / 256^(exponent-3) + var mantissa uint32 + exponent := uint(len(n.Bytes())) + if exponent <= 3 { + mantissa = uint32(n.Bits()[0]) + mantissa <<= 8 * (3 - exponent) + } else { + // Use a copy to avoid modifying the caller's original number. + tn := new(big.Int).Set(n) + mantissa = uint32(tn.Rsh(tn, 8*(exponent-3)).Bits()[0]) + } + + // When the mantissa already has the sign bit set, the number is too + // large to fit into the available 23-bits, so divide the number by 256 + // and increment the exponent accordingly. + if mantissa&0x00800000 != 0 { + mantissa >>= 8 + exponent++ + } + + // Pack the exponent, sign bit, and mantissa into an unsigned 32-bit + // int and return it. + compact := uint32(exponent<<24) | mantissa + if n.Sign() < 0 { + compact |= 0x00800000 + } + return compact +} + +// CalcWork calculates a work value from difficulty bits. Bitcoin increases +// the difficulty for generating a block by decreasing the value which the +// generated hash must be less than. This difficulty target is stored in each +// block header using a compact representation as described in the documentation +// for CompactToBig. The main chain is selected by choosing the chain that has +// the most proof of work (highest difficulty). Since a lower target difficulty +// value equates to higher actual difficulty, the work value which will be +// accumulated must be the inverse of the difficulty. Also, in order to avoid +// potential division by zero and really small floating point numbers, the +// result adds 1 to the denominator and multiplies the numerator by 2^256. +func CalcWork(bits uint32) *big.Int { + // Return a work value of zero if the passed difficulty bits represent + // a negative number. Note this should not happen in practice with valid + // blocks, but an invalid block could trigger it. + difficultyNum := CompactToBig(bits) + if difficultyNum.Sign() <= 0 { + return big.NewInt(0) + } + + // (1 << 256) / (difficultyNum + 1) + denominator := new(big.Int).Add(difficultyNum, bigOne) + return new(big.Int).Div(oneLsh256, denominator) +} + +// calcEasiestDifficulty calculates the easiest possible difficulty that a block +// can have given starting difficulty bits and a duration. It is mainly used to +// verify that claimed proof of work by a block is sane as compared to a +// known good checkpoint. +func (b *BlockChain) calcEasiestDifficulty(bits uint32, duration time.Duration) uint32 { + // Convert types used in the calculations below. + durationVal := int64(duration / time.Second) + adjustmentFactor := big.NewInt(b.chainParams.RetargetAdjustmentFactor) + + // The test network rules allow minimum difficulty blocks after more + // than twice the desired amount of time needed to generate a block has + // elapsed. + if b.chainParams.ReduceMinDifficulty { + reductionTime := int64(b.chainParams.MinDiffReductionTime / + time.Second) + if durationVal > reductionTime { + return b.chainParams.PowLimitBits + } + } + + // Since easier difficulty equates to higher numbers, the easiest + // difficulty for a given duration is the largest value possible given + // the number of retargets for the duration and starting difficulty + // multiplied by the max adjustment factor. + newTarget := CompactToBig(bits) + for durationVal > 0 && newTarget.Cmp(b.chainParams.PowLimit) < 0 { + newTarget.Mul(newTarget, adjustmentFactor) + durationVal -= b.maxRetargetTimespan + } + + // Limit new value to the proof of work limit. + if newTarget.Cmp(b.chainParams.PowLimit) > 0 { + newTarget.Set(b.chainParams.PowLimit) + } + + return BigToCompact(newTarget) +} + +// findPrevTestNetDifficulty returns the difficulty of the previous block which +// did not have the special testnet minimum difficulty rule applied. +// +// This function MUST be called with the chain state lock held (for writes). +func (b *BlockChain) findPrevTestNetDifficulty(startNode *blockNode) (uint32, error) { + // Search backwards through the chain for the last block without + // the special rule applied. + iterNode := startNode + for iterNode != nil && iterNode.height%b.blocksPerRetarget != 0 && + iterNode.bits == b.chainParams.PowLimitBits { + + // Get the previous block node. This function is used over + // simply accessing iterNode.parent directly as it will + // dynamically create previous block nodes as needed. This + // helps allow only the pieces of the chain that are needed + // to remain in memory. + var err error + iterNode, err = b.index.PrevNodeFromNode(iterNode) + if err != nil { + log.Errorf("PrevNodeFromNode: %v", err) + return 0, err + } + } + + // Return the found difficulty or the minimum difficulty if no + // appropriate block was found. + lastBits := b.chainParams.PowLimitBits + if iterNode != nil { + lastBits = iterNode.bits + } + return lastBits, nil +} + +// calcNextRequiredDifficulty calculates the required difficulty for the block +// after the passed previous block node based on the difficulty retarget rules. +// This function differs from the exported CalcNextRequiredDifficulty in that +// the exported version uses the current best chain as the previous block node +// while this function accepts any block node. +// +// This function MUST be called with the chain state lock held (for writes). +func (b *BlockChain) calcNextRequiredDifficulty(lastNode *blockNode, newBlockTime time.Time) (uint32, error) { + // Genesis block. + if lastNode == nil { + return b.chainParams.PowLimitBits, nil + } + + // Return the previous block's difficulty requirements if this block + // is not at a difficulty retarget interval. + if (lastNode.height+1)%b.blocksPerRetarget != 0 { + // For networks that support it, allow special reduction of the + // required difficulty once too much time has elapsed without + // mining a block. + if b.chainParams.ReduceMinDifficulty { + // Return minimum difficulty when more than the desired + // amount of time has elapsed without mining a block. + reductionTime := int64(b.chainParams.MinDiffReductionTime / + time.Second) + allowMinTime := lastNode.timestamp + reductionTime + if newBlockTime.Unix() > allowMinTime { + return b.chainParams.PowLimitBits, nil + } + + // The block was mined within the desired timeframe, so + // return the difficulty for the last block which did + // not have the special minimum difficulty rule applied. + prevBits, err := b.findPrevTestNetDifficulty(lastNode) + if err != nil { + return 0, err + } + return prevBits, nil + } + + // For the main network (or any unrecognized networks), simply + // return the previous block's difficulty requirements. + return lastNode.bits, nil + } + + // Get the block node at the previous retarget (targetTimespan days + // worth of blocks). + firstNode := lastNode + for i := int32(0); i < b.blocksPerRetarget-1 && firstNode != nil; i++ { + // Get the previous block node. This function is used over + // simply accessing firstNode.parent directly as it will + // dynamically create previous block nodes as needed. This + // helps allow only the pieces of the chain that are needed + // to remain in memory. + var err error + firstNode, err = b.index.PrevNodeFromNode(firstNode) + if err != nil { + return 0, err + } + } + + if firstNode == nil { + return 0, AssertError("unable to obtain previous retarget block") + } + + // Limit the amount of adjustment that can occur to the previous + // difficulty. + actualTimespan := lastNode.timestamp - firstNode.timestamp + adjustedTimespan := actualTimespan + if actualTimespan < b.minRetargetTimespan { + adjustedTimespan = b.minRetargetTimespan + } else if actualTimespan > b.maxRetargetTimespan { + adjustedTimespan = b.maxRetargetTimespan + } + + // Calculate new target difficulty as: + // currentDifficulty * (adjustedTimespan / targetTimespan) + // The result uses integer division which means it will be slightly + // rounded down. Bitcoind also uses integer division to calculate this + // result. + oldTarget := CompactToBig(lastNode.bits) + newTarget := new(big.Int).Mul(oldTarget, big.NewInt(adjustedTimespan)) + targetTimeSpan := int64(b.chainParams.TargetTimespan / time.Second) + newTarget.Div(newTarget, big.NewInt(targetTimeSpan)) + + // Limit new value to the proof of work limit. + if newTarget.Cmp(b.chainParams.PowLimit) > 0 { + newTarget.Set(b.chainParams.PowLimit) + } + + // Log new target difficulty and return it. The new target logging is + // intentionally converting the bits back to a number instead of using + // newTarget since conversion to the compact representation loses + // precision. + newTargetBits := BigToCompact(newTarget) + log.Debugf("Difficulty retarget at block height %d", lastNode.height+1) + log.Debugf("Old target %08x (%064x)", lastNode.bits, oldTarget) + log.Debugf("New target %08x (%064x)", newTargetBits, CompactToBig(newTargetBits)) + log.Debugf("Actual timespan %v, adjusted timespan %v, target timespan %v", + time.Duration(actualTimespan)*time.Second, + time.Duration(adjustedTimespan)*time.Second, + b.chainParams.TargetTimespan) + + return newTargetBits, nil +} + +// CalcNextRequiredDifficulty calculates the required difficulty for the block +// after the end of the current best chain based on the difficulty retarget +// rules. +// +// This function is safe for concurrent access. +func (b *BlockChain) CalcNextRequiredDifficulty(timestamp time.Time) (uint32, error) { + b.chainLock.Lock() + difficulty, err := b.calcNextRequiredDifficulty(b.bestNode, timestamp) + b.chainLock.Unlock() + return difficulty, err +} +*/