From 0a9f7e9411030fab69c0567e83d4c010bea9805f Mon Sep 17 00:00:00 2001 From: Paladz Date: Fri, 15 Feb 2019 18:27:35 +0800 Subject: [PATCH] delete unused (#1566) * delete unused * edit the redirect --- api/api.go | 12 +- api/metrics.go | 32 -- metrics/bench_test.go | 22 - metrics/metrics.go | 191 ------- metrics/metrics_test.go | 64 --- .../github.com/codahale/hdrhistogram/.travis.yml | 5 - vendor/github.com/codahale/hdrhistogram/LICENSE | 21 - vendor/github.com/codahale/hdrhistogram/README.md | 15 - vendor/github.com/codahale/hdrhistogram/hdr.go | 564 --------------------- .../github.com/codahale/hdrhistogram/hdr_test.go | 388 -------------- vendor/github.com/codahale/hdrhistogram/window.go | 45 -- .../codahale/hdrhistogram/window_test.go | 64 --- 12 files changed, 2 insertions(+), 1421 deletions(-) delete mode 100644 api/metrics.go delete mode 100644 metrics/bench_test.go delete mode 100644 metrics/metrics.go delete mode 100644 metrics/metrics_test.go delete mode 100644 vendor/github.com/codahale/hdrhistogram/.travis.yml delete mode 100644 vendor/github.com/codahale/hdrhistogram/LICENSE delete mode 100644 vendor/github.com/codahale/hdrhistogram/README.md delete mode 100644 vendor/github.com/codahale/hdrhistogram/hdr.go delete mode 100644 vendor/github.com/codahale/hdrhistogram/hdr_test.go delete mode 100644 vendor/github.com/codahale/hdrhistogram/window.go delete mode 100644 vendor/github.com/codahale/hdrhistogram/window_test.go diff --git a/api/api.go b/api/api.go index d23b5329..6d2423dc 100644 --- a/api/api.go +++ b/api/api.go @@ -198,7 +198,6 @@ func (a *API) buildHandler() { m := http.NewServeMux() if a.wallet != nil { walletEnable = true - m.Handle("/create-account", jsonHandler(a.createAccount)) m.Handle("/update-account-alias", jsonHandler(a.updateAccountAlias)) m.Handle("/list-accounts", jsonHandler(a.listAccounts)) @@ -303,10 +302,9 @@ func (a *API) buildHandler() { m.HandleFunc("/websocket-subscribe", a.websocketHandler) - handler := latencyHandler(m, walletEnable) + handler := walletHandler(m, walletEnable) handler = webAssetsHandler(handler) handler = gzip.Handler{Handler: handler} - a.handler = handler } @@ -367,14 +365,8 @@ func RedirectHandler(next http.Handler) http.Handler { }) } -// latencyHandler take latency for the request url path, and redirect url path to wait-disable when wallet is closed -func latencyHandler(m *http.ServeMux, walletEnable bool) http.Handler { +func walletHandler(m *http.ServeMux, walletEnable bool) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - // latency for the request url path - if l := latency(m, req); l != nil { - defer l.RecordSince(time.Now()) - } - // when the wallet is not been opened and the url path is not been found, modify url path to error, // and redirect handler to error if _, pattern := m.Handler(req); pattern != req.URL.Path && !walletEnable { diff --git a/api/metrics.go b/api/metrics.go deleted file mode 100644 index 21423fb7..00000000 --- a/api/metrics.go +++ /dev/null @@ -1,32 +0,0 @@ -package api - -import ( - "net/http" - "sync" - "time" - - "github.com/bytom/metrics" -) - -var ( - latencyMu sync.Mutex - latencies = map[string]*metrics.RotatingLatency{} -) - -// latency returns a rotating latency histogram for the given request. -func latency(tab *http.ServeMux, req *http.Request) *metrics.RotatingLatency { - latencyMu.Lock() - defer latencyMu.Unlock() - if l := latencies[req.URL.Path]; l != nil { - return l - } - // Create a histogram only if the path is legit. - if _, pat := tab.Handler(req); pat == req.URL.Path { - d := 100 * time.Millisecond - l := metrics.NewRotatingLatency(5, d) - latencies[req.URL.Path] = l - metrics.PublishLatency(req.URL.Path, l) - return l - } - return nil -} diff --git a/metrics/bench_test.go b/metrics/bench_test.go deleted file mode 100644 index 75a03f37..00000000 --- a/metrics/bench_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package metrics - -import ( - "fmt" - "io/ioutil" - "testing" - "time" -) - -func BenchmarkRecord(b *testing.B) { - rot := NewRotatingLatency(5, time.Second) - for i := 0; i < b.N; i++ { - rot.Record(0) - } -} - -func BenchmarkStringFormat(b *testing.B) { - rot := NewRotatingLatency(5, time.Second) - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%s", rot) - } -} diff --git a/metrics/metrics.go b/metrics/metrics.go deleted file mode 100644 index 89f2f00f..00000000 --- a/metrics/metrics.go +++ /dev/null @@ -1,191 +0,0 @@ -// Package metrics provides convenient facilities to record -// on-line high-level performance metrics. -package metrics - -import ( - "bytes" - "encoding/json" - "expvar" - "fmt" - "sync" - "time" - - "github.com/codahale/hdrhistogram" -) - -// Period is the size of a RotatingLatency bucket. -// Each RotatingLatency will rotate once per Period. -const Period = time.Minute - -var ( - rotatingLatenciesMu sync.Mutex - rotatingLatencies []*RotatingLatency - latencyExpvar = expvar.NewMap("latency") -) - -// PublishLatency publishes rl as an expvar inside the -// global latency map (which is itself published under -// the key "latency"). -func PublishLatency(key string, rl *RotatingLatency) { - latencyExpvar.Set(key, rl) -} - -// A Latency records information about the aggregate latency -// of an operation over time. -// Internally it holds an HDR histogram (to three significant figures) -// and a counter of attempts to record a value -// greater than the histogram's max. -type Latency struct { - limit time.Duration // readonly - - time time.Time - hdr hdrhistogram.Histogram - nover int // how many values were over limit - max time.Duration // max recorded value (can be over limit) -} - -// NewLatency returns a new latency histogram with the given -// duration limit and with three significant figures of precision. -func NewLatency(limit time.Duration) *Latency { - return &Latency{ - hdr: *hdrhistogram.New(0, int64(limit), 2), - limit: limit, - } -} - -// Record attempts to record a duration in the histogram. -// If d is greater than the max allowed duration, -// it increments a counter instead. -func (l *Latency) Record(d time.Duration) { - if d > l.max { - l.max = d - } - if d > l.limit { - l.nover++ - } else { - l.hdr.RecordValue(int64(d)) - } -} - -// Reset resets l to is original empty state. -func (l *Latency) Reset() { - l.hdr.Reset() - l.nover = 0 -} - -// String returns l as a JSON string. -// This makes it suitable for use as an expvar.Val. -func (l *Latency) String() string { - var b bytes.Buffer - fmt.Fprintf(&b, `{"Histogram":`) - h, _ := json.Marshal((&l.hdr).Export()) // #nosec - b.Write(h) - fmt.Fprintf(&b, `,"Over":%d,"Timestamp":%d,"Max":%d}`, l.nover, l.time.Unix(), l.max) - return b.String() -} - -// A RotatingLatency holds a rotating circular buffer of Latency objects, -// that rotates once per Period time. -// It can be used as an expvar Val. -// Its exported methods are safe to call concurrently. -type RotatingLatency struct { - mu sync.Mutex - l []Latency - n int - cur *Latency -} - -// NewRotatingLatency returns a new rotating latency recorder -// with n buckets of history. -func NewRotatingLatency(n int, max time.Duration) *RotatingLatency { - r := &RotatingLatency{ - l: make([]Latency, n), - } - for i := range r.l { - r.l[i] = *NewLatency(max) - } - r.rotate() - rotatingLatenciesMu.Lock() - rotatingLatencies = append(rotatingLatencies, r) - rotatingLatenciesMu.Unlock() - return r -} - -// Record attempts to record a duration in the current Latency in r. -// If d is greater than the max allowed duration, -// it increments a counter instead. -func (r *RotatingLatency) Record(d time.Duration) { - r.mu.Lock() - r.cur.Record(d) - r.mu.Unlock() -} - -func (r *RotatingLatency) RecordSince(t0 time.Time) { - r.Record(time.Since(t0)) -} - -func (r *RotatingLatency) rotate() { - r.mu.Lock() - defer r.mu.Unlock() - if r.cur != nil { - r.cur.time = time.Now() - } - r.n++ - r.cur = &r.l[r.n%len(r.l)] - r.cur.Reset() -} - -// String returns r as a JSON string. -// This makes it suitable for use as an expvar.Val. -// -// Example: -// -// { -// "NumRot": 204, -// "Buckets": [ -// { -// "Over": 4, -// "Histogram": { -// "LowestTrackableValue": 0, -// "HighestTrackableValue": 1000000000, -// "SignificantFigures": 2, -// "Counts": [2,0,15,...] -// } -// }, -// ... -// ] -// } -// -// Note that the last bucket is actively recording values. -// To collect complete and accurate data over a long time, -// store the next-to-last bucket after each rotation. -// The last bucket is only useful for a "live" view -// with finer granularity than the rotation period (which is one minute). -func (r *RotatingLatency) String() string { - r.mu.Lock() - defer r.mu.Unlock() - var b bytes.Buffer - fmt.Fprintf(&b, `{"Buckets":[`) - for i := range r.l { - if i > 0 { - b.WriteByte(',') - } - j := (r.n + i + 1) % len(r.l) - fmt.Fprintf(&b, "%s", &r.l[j]) - } - fmt.Fprintf(&b, `],"NumRot":%d}`, r.n) - return b.String() -} - -func init() { - go func() { - for range time.Tick(Period) { - rotatingLatenciesMu.Lock() - a := rotatingLatencies - rotatingLatenciesMu.Unlock() - for _, rot := range a { - rot.rotate() - } - } - }() -} diff --git a/metrics/metrics_test.go b/metrics/metrics_test.go deleted file mode 100644 index ca294d5f..00000000 --- a/metrics/metrics_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package metrics - -import ( - "encoding/json" - "reflect" - "testing" - "time" - - "github.com/codahale/hdrhistogram" -) - -func TestRotString(t *testing.T) { - rot := NewRotatingLatency(2, time.Second) - rot.l[0].hdr = *hdrhistogram.New(0, int64(time.Second), 1) - rot.l[1].hdr = *hdrhistogram.New(1, int64(time.Second), 1) // "current" - - want := `{ - "NumRot": 1, - "Buckets": [ - { - "Max": 0, - "Over": 0, - "Timestamp": -62135596800, - "Histogram": { - "LowestTrackableValue": 0, - "HighestTrackableValue": 1000000000, - "SignificantFigures": 1, - "Counts": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] - } - }, - { - "Max": 0, - "Over": 0, - "Timestamp": -62135596800, - "Histogram": { - "LowestTrackableValue": 1, - "HighestTrackableValue": 1000000000, - "SignificantFigures": 1, - "Counts": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] - } - } - ] - }` - - got := rot.String() - if !jsonIsEqual(t, got, want) { - t.Errorf("%#v.String() = %#q want %#q", rot, got, want) - } -} - -func jsonIsEqual(t *testing.T, a, b string) bool { - var av, bv interface{} - - err := json.Unmarshal([]byte(a), &av) - if err != nil { - t.Fatal(err, a) - } - err = json.Unmarshal([]byte(b), &bv) - if err != nil { - t.Fatal(err, b) - } - - return reflect.DeepEqual(av, bv) -} diff --git a/vendor/github.com/codahale/hdrhistogram/.travis.yml b/vendor/github.com/codahale/hdrhistogram/.travis.yml deleted file mode 100644 index 7960fc95..00000000 --- a/vendor/github.com/codahale/hdrhistogram/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go -go: - - 1.5 - - 1.6 - - tip diff --git a/vendor/github.com/codahale/hdrhistogram/LICENSE b/vendor/github.com/codahale/hdrhistogram/LICENSE deleted file mode 100644 index f9835c24..00000000 --- a/vendor/github.com/codahale/hdrhistogram/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Coda Hale - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/codahale/hdrhistogram/README.md b/vendor/github.com/codahale/hdrhistogram/README.md deleted file mode 100644 index 614b197c..00000000 --- a/vendor/github.com/codahale/hdrhistogram/README.md +++ /dev/null @@ -1,15 +0,0 @@ -hdrhistogram -============ - -[![Build Status](https://travis-ci.org/codahale/hdrhistogram.png?branch=master)](https://travis-ci.org/codahale/hdrhistogram) - -A pure Go implementation of the [HDR Histogram](https://github.com/HdrHistogram/HdrHistogram). - -> A Histogram that supports recording and analyzing sampled data value counts -> across a configurable integer value range with configurable value precision -> within the range. Value precision is expressed as the number of significant -> digits in the value recording, and provides control over value quantization -> behavior across the value range and the subsequent value resolution at any -> given level. - -For documentation, check [godoc](http://godoc.org/github.com/codahale/hdrhistogram). diff --git a/vendor/github.com/codahale/hdrhistogram/hdr.go b/vendor/github.com/codahale/hdrhistogram/hdr.go deleted file mode 100644 index c9784292..00000000 --- a/vendor/github.com/codahale/hdrhistogram/hdr.go +++ /dev/null @@ -1,564 +0,0 @@ -// Package hdrhistogram provides an implementation of Gil Tene's HDR Histogram -// data structure. The HDR Histogram allows for fast and accurate analysis of -// the extreme ranges of data with non-normal distributions, like latency. -package hdrhistogram - -import ( - "fmt" - "math" -) - -// A Bracket is a part of a cumulative distribution. -type Bracket struct { - Quantile float64 - Count, ValueAt int64 -} - -// A Snapshot is an exported view of a Histogram, useful for serializing them. -// A Histogram can be constructed from it by passing it to Import. -type Snapshot struct { - LowestTrackableValue int64 - HighestTrackableValue int64 - SignificantFigures int64 - Counts []int64 -} - -// A Histogram is a lossy data structure used to record the distribution of -// non-normally distributed data (like latency) with a high degree of accuracy -// and a bounded degree of precision. -type Histogram struct { - lowestTrackableValue int64 - highestTrackableValue int64 - unitMagnitude int64 - significantFigures int64 - subBucketHalfCountMagnitude int32 - subBucketHalfCount int32 - subBucketMask int64 - subBucketCount int32 - bucketCount int32 - countsLen int32 - totalCount int64 - counts []int64 -} - -// New returns a new Histogram instance capable of tracking values in the given -// range and with the given amount of precision. -func New(minValue, maxValue int64, sigfigs int) *Histogram { - if sigfigs < 1 || 5 < sigfigs { - panic(fmt.Errorf("sigfigs must be [1,5] (was %d)", sigfigs)) - } - - largestValueWithSingleUnitResolution := 2 * math.Pow10(sigfigs) - subBucketCountMagnitude := int32(math.Ceil(math.Log2(float64(largestValueWithSingleUnitResolution)))) - - subBucketHalfCountMagnitude := subBucketCountMagnitude - if subBucketHalfCountMagnitude < 1 { - subBucketHalfCountMagnitude = 1 - } - subBucketHalfCountMagnitude-- - - unitMagnitude := int32(math.Floor(math.Log2(float64(minValue)))) - if unitMagnitude < 0 { - unitMagnitude = 0 - } - - subBucketCount := int32(math.Pow(2, float64(subBucketHalfCountMagnitude)+1)) - - subBucketHalfCount := subBucketCount / 2 - subBucketMask := int64(subBucketCount-1) << uint(unitMagnitude) - - // determine exponent range needed to support the trackable value with no - // overflow: - smallestUntrackableValue := int64(subBucketCount) << uint(unitMagnitude) - bucketsNeeded := int32(1) - for smallestUntrackableValue < maxValue { - smallestUntrackableValue <<= 1 - bucketsNeeded++ - } - - bucketCount := bucketsNeeded - countsLen := (bucketCount + 1) * (subBucketCount / 2) - - return &Histogram{ - lowestTrackableValue: minValue, - highestTrackableValue: maxValue, - unitMagnitude: int64(unitMagnitude), - significantFigures: int64(sigfigs), - subBucketHalfCountMagnitude: subBucketHalfCountMagnitude, - subBucketHalfCount: subBucketHalfCount, - subBucketMask: subBucketMask, - subBucketCount: subBucketCount, - bucketCount: bucketCount, - countsLen: countsLen, - totalCount: 0, - counts: make([]int64, countsLen), - } -} - -// ByteSize returns an estimate of the amount of memory allocated to the -// histogram in bytes. -// -// N.B.: This does not take into account the overhead for slices, which are -// small, constant, and specific to the compiler version. -func (h *Histogram) ByteSize() int { - return 6*8 + 5*4 + len(h.counts)*8 -} - -// Merge merges the data stored in the given histogram with the receiver, -// returning the number of recorded values which had to be dropped. -func (h *Histogram) Merge(from *Histogram) (dropped int64) { - i := from.rIterator() - for i.next() { - v := i.valueFromIdx - c := i.countAtIdx - - if h.RecordValues(v, c) != nil { - dropped += c - } - } - - return -} - -// TotalCount returns total number of values recorded. -func (h *Histogram) TotalCount() int64 { - return h.totalCount -} - -// Max returns the approximate maximum recorded value. -func (h *Histogram) Max() int64 { - var max int64 - i := h.iterator() - for i.next() { - if i.countAtIdx != 0 { - max = i.highestEquivalentValue - } - } - return h.highestEquivalentValue(max) -} - -// Min returns the approximate minimum recorded value. -func (h *Histogram) Min() int64 { - var min int64 - i := h.iterator() - for i.next() { - if i.countAtIdx != 0 && min == 0 { - min = i.highestEquivalentValue - break - } - } - return h.lowestEquivalentValue(min) -} - -// Mean returns the approximate arithmetic mean of the recorded values. -func (h *Histogram) Mean() float64 { - if h.totalCount == 0 { - return 0 - } - var total int64 - i := h.iterator() - for i.next() { - if i.countAtIdx != 0 { - total += i.countAtIdx * h.medianEquivalentValue(i.valueFromIdx) - } - } - return float64(total) / float64(h.totalCount) -} - -// StdDev returns the approximate standard deviation of the recorded values. -func (h *Histogram) StdDev() float64 { - if h.totalCount == 0 { - return 0 - } - - mean := h.Mean() - geometricDevTotal := 0.0 - - i := h.iterator() - for i.next() { - if i.countAtIdx != 0 { - dev := float64(h.medianEquivalentValue(i.valueFromIdx)) - mean - geometricDevTotal += (dev * dev) * float64(i.countAtIdx) - } - } - - return math.Sqrt(geometricDevTotal / float64(h.totalCount)) -} - -// Reset deletes all recorded values and restores the histogram to its original -// state. -func (h *Histogram) Reset() { - h.totalCount = 0 - for i := range h.counts { - h.counts[i] = 0 - } -} - -// RecordValue records the given value, returning an error if the value is out -// of range. -func (h *Histogram) RecordValue(v int64) error { - return h.RecordValues(v, 1) -} - -// RecordCorrectedValue records the given value, correcting for stalls in the -// recording process. This only works for processes which are recording values -// at an expected interval (e.g., doing jitter analysis). Processes which are -// recording ad-hoc values (e.g., latency for incoming requests) can't take -// advantage of this. -func (h *Histogram) RecordCorrectedValue(v, expectedInterval int64) error { - if err := h.RecordValue(v); err != nil { - return err - } - - if expectedInterval <= 0 || v <= expectedInterval { - return nil - } - - missingValue := v - expectedInterval - for missingValue >= expectedInterval { - if err := h.RecordValue(missingValue); err != nil { - return err - } - missingValue -= expectedInterval - } - - return nil -} - -// RecordValues records n occurrences of the given value, returning an error if -// the value is out of range. -func (h *Histogram) RecordValues(v, n int64) error { - idx := h.countsIndexFor(v) - if idx < 0 || int(h.countsLen) <= idx { - return fmt.Errorf("value %d is too large to be recorded", v) - } - h.counts[idx] += n - h.totalCount += n - - return nil -} - -// ValueAtQuantile returns the recorded value at the given quantile (0..100). -func (h *Histogram) ValueAtQuantile(q float64) int64 { - if q > 100 { - q = 100 - } - - total := int64(0) - countAtPercentile := int64(((q / 100) * float64(h.totalCount)) + 0.5) - - i := h.iterator() - for i.next() { - total += i.countAtIdx - if total >= countAtPercentile { - return h.highestEquivalentValue(i.valueFromIdx) - } - } - - return 0 -} - -// CumulativeDistribution returns an ordered list of brackets of the -// distribution of recorded values. -func (h *Histogram) CumulativeDistribution() []Bracket { - var result []Bracket - - i := h.pIterator(1) - for i.next() { - result = append(result, Bracket{ - Quantile: i.percentile, - Count: i.countToIdx, - ValueAt: i.highestEquivalentValue, - }) - } - - return result -} - -// SignificantFigures returns the significant figures used to create the -// histogram -func (h *Histogram) SignificantFigures() int64 { - return h.significantFigures -} - -// LowestTrackableValue returns the lower bound on values that will be added -// to the histogram -func (h *Histogram) LowestTrackableValue() int64 { - return h.lowestTrackableValue -} - -// HighestTrackableValue returns the upper bound on values that will be added -// to the histogram -func (h *Histogram) HighestTrackableValue() int64 { - return h.highestTrackableValue -} - -// Histogram bar for plotting -type Bar struct { - From, To, Count int64 -} - -// Pretty print as csv for easy plotting -func (b Bar) String() string { - return fmt.Sprintf("%v, %v, %v\n", b.From, b.To, b.Count) -} - -// Distribution returns an ordered list of bars of the -// distribution of recorded values, counts can be normalized to a probability -func (h *Histogram) Distribution() (result []Bar) { - i := h.iterator() - for i.next() { - result = append(result, Bar{ - Count: i.countAtIdx, - From: h.lowestEquivalentValue(i.valueFromIdx), - To: i.highestEquivalentValue, - }) - } - - return result -} - -// Equals returns true if the two Histograms are equivalent, false if not. -func (h *Histogram) Equals(other *Histogram) bool { - switch { - case - h.lowestTrackableValue != other.lowestTrackableValue, - h.highestTrackableValue != other.highestTrackableValue, - h.unitMagnitude != other.unitMagnitude, - h.significantFigures != other.significantFigures, - h.subBucketHalfCountMagnitude != other.subBucketHalfCountMagnitude, - h.subBucketHalfCount != other.subBucketHalfCount, - h.subBucketMask != other.subBucketMask, - h.subBucketCount != other.subBucketCount, - h.bucketCount != other.bucketCount, - h.countsLen != other.countsLen, - h.totalCount != other.totalCount: - return false - default: - for i, c := range h.counts { - if c != other.counts[i] { - return false - } - } - } - return true -} - -// Export returns a snapshot view of the Histogram. This can be later passed to -// Import to construct a new Histogram with the same state. -func (h *Histogram) Export() *Snapshot { - return &Snapshot{ - LowestTrackableValue: h.lowestTrackableValue, - HighestTrackableValue: h.highestTrackableValue, - SignificantFigures: h.significantFigures, - Counts: append([]int64(nil), h.counts...), // copy - } -} - -// Import returns a new Histogram populated from the Snapshot data (which the -// caller must stop accessing). -func Import(s *Snapshot) *Histogram { - h := New(s.LowestTrackableValue, s.HighestTrackableValue, int(s.SignificantFigures)) - h.counts = s.Counts - totalCount := int64(0) - for i := int32(0); i < h.countsLen; i++ { - countAtIndex := h.counts[i] - if countAtIndex > 0 { - totalCount += countAtIndex - } - } - h.totalCount = totalCount - return h -} - -func (h *Histogram) iterator() *iterator { - return &iterator{ - h: h, - subBucketIdx: -1, - } -} - -func (h *Histogram) rIterator() *rIterator { - return &rIterator{ - iterator: iterator{ - h: h, - subBucketIdx: -1, - }, - } -} - -func (h *Histogram) pIterator(ticksPerHalfDistance int32) *pIterator { - return &pIterator{ - iterator: iterator{ - h: h, - subBucketIdx: -1, - }, - ticksPerHalfDistance: ticksPerHalfDistance, - } -} - -func (h *Histogram) sizeOfEquivalentValueRange(v int64) int64 { - bucketIdx := h.getBucketIndex(v) - subBucketIdx := h.getSubBucketIdx(v, bucketIdx) - adjustedBucket := bucketIdx - if subBucketIdx >= h.subBucketCount { - adjustedBucket++ - } - return int64(1) << uint(h.unitMagnitude+int64(adjustedBucket)) -} - -func (h *Histogram) valueFromIndex(bucketIdx, subBucketIdx int32) int64 { - return int64(subBucketIdx) << uint(int64(bucketIdx)+h.unitMagnitude) -} - -func (h *Histogram) lowestEquivalentValue(v int64) int64 { - bucketIdx := h.getBucketIndex(v) - subBucketIdx := h.getSubBucketIdx(v, bucketIdx) - return h.valueFromIndex(bucketIdx, subBucketIdx) -} - -func (h *Histogram) nextNonEquivalentValue(v int64) int64 { - return h.lowestEquivalentValue(v) + h.sizeOfEquivalentValueRange(v) -} - -func (h *Histogram) highestEquivalentValue(v int64) int64 { - return h.nextNonEquivalentValue(v) - 1 -} - -func (h *Histogram) medianEquivalentValue(v int64) int64 { - return h.lowestEquivalentValue(v) + (h.sizeOfEquivalentValueRange(v) >> 1) -} - -func (h *Histogram) getCountAtIndex(bucketIdx, subBucketIdx int32) int64 { - return h.counts[h.countsIndex(bucketIdx, subBucketIdx)] -} - -func (h *Histogram) countsIndex(bucketIdx, subBucketIdx int32) int32 { - bucketBaseIdx := (bucketIdx + 1) << uint(h.subBucketHalfCountMagnitude) - offsetInBucket := subBucketIdx - h.subBucketHalfCount - return bucketBaseIdx + offsetInBucket -} - -func (h *Histogram) getBucketIndex(v int64) int32 { - pow2Ceiling := bitLen(v | h.subBucketMask) - return int32(pow2Ceiling - int64(h.unitMagnitude) - - int64(h.subBucketHalfCountMagnitude+1)) -} - -func (h *Histogram) getSubBucketIdx(v int64, idx int32) int32 { - return int32(v >> uint(int64(idx)+int64(h.unitMagnitude))) -} - -func (h *Histogram) countsIndexFor(v int64) int { - bucketIdx := h.getBucketIndex(v) - subBucketIdx := h.getSubBucketIdx(v, bucketIdx) - return int(h.countsIndex(bucketIdx, subBucketIdx)) -} - -type iterator struct { - h *Histogram - bucketIdx, subBucketIdx int32 - countAtIdx, countToIdx, valueFromIdx int64 - highestEquivalentValue int64 -} - -func (i *iterator) next() bool { - if i.countToIdx >= i.h.totalCount { - return false - } - - // increment bucket - i.subBucketIdx++ - if i.subBucketIdx >= i.h.subBucketCount { - i.subBucketIdx = i.h.subBucketHalfCount - i.bucketIdx++ - } - - if i.bucketIdx >= i.h.bucketCount { - return false - } - - i.countAtIdx = i.h.getCountAtIndex(i.bucketIdx, i.subBucketIdx) - i.countToIdx += i.countAtIdx - i.valueFromIdx = i.h.valueFromIndex(i.bucketIdx, i.subBucketIdx) - i.highestEquivalentValue = i.h.highestEquivalentValue(i.valueFromIdx) - - return true -} - -type rIterator struct { - iterator - countAddedThisStep int64 -} - -func (r *rIterator) next() bool { - for r.iterator.next() { - if r.countAtIdx != 0 { - r.countAddedThisStep = r.countAtIdx - return true - } - } - return false -} - -type pIterator struct { - iterator - seenLastValue bool - ticksPerHalfDistance int32 - percentileToIteratorTo float64 - percentile float64 -} - -func (p *pIterator) next() bool { - if !(p.countToIdx < p.h.totalCount) { - if p.seenLastValue { - return false - } - - p.seenLastValue = true - p.percentile = 100 - - return true - } - - if p.subBucketIdx == -1 && !p.iterator.next() { - return false - } - - var done = false - for !done { - currentPercentile := (100.0 * float64(p.countToIdx)) / float64(p.h.totalCount) - if p.countAtIdx != 0 && p.percentileToIteratorTo <= currentPercentile { - p.percentile = p.percentileToIteratorTo - halfDistance := math.Trunc(math.Pow(2, math.Trunc(math.Log2(100.0/(100.0-p.percentileToIteratorTo)))+1)) - percentileReportingTicks := float64(p.ticksPerHalfDistance) * halfDistance - p.percentileToIteratorTo += 100.0 / percentileReportingTicks - return true - } - done = !p.iterator.next() - } - - return true -} - -func bitLen(x int64) (n int64) { - for ; x >= 0x8000; x >>= 16 { - n += 16 - } - if x >= 0x80 { - x >>= 8 - n += 8 - } - if x >= 0x8 { - x >>= 4 - n += 4 - } - if x >= 0x2 { - x >>= 2 - n += 2 - } - if x >= 0x1 { - n++ - } - return -} diff --git a/vendor/github.com/codahale/hdrhistogram/hdr_test.go b/vendor/github.com/codahale/hdrhistogram/hdr_test.go deleted file mode 100644 index 309f0ea8..00000000 --- a/vendor/github.com/codahale/hdrhistogram/hdr_test.go +++ /dev/null @@ -1,388 +0,0 @@ -package hdrhistogram_test - -import ( - "math" - "reflect" - "testing" - - "github.com/codahale/hdrhistogram" -) - -func TestHighSigFig(t *testing.T) { - input := []int64{ - 459876, 669187, 711612, 816326, 931423, 1033197, 1131895, 2477317, - 3964974, 12718782, - } - - hist := hdrhistogram.New(459876, 12718782, 5) - for _, sample := range input { - hist.RecordValue(sample) - } - - if v, want := hist.ValueAtQuantile(50), int64(1048575); v != want { - t.Errorf("Median was %v, but expected %v", v, want) - } -} - -func TestValueAtQuantile(t *testing.T) { - h := hdrhistogram.New(1, 10000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - data := []struct { - q float64 - v int64 - }{ - {q: 50, v: 500223}, - {q: 75, v: 750079}, - {q: 90, v: 900095}, - {q: 95, v: 950271}, - {q: 99, v: 990207}, - {q: 99.9, v: 999423}, - {q: 99.99, v: 999935}, - } - - for _, d := range data { - if v := h.ValueAtQuantile(d.q); v != d.v { - t.Errorf("P%v was %v, but expected %v", d.q, v, d.v) - } - } -} - -func TestMean(t *testing.T) { - h := hdrhistogram.New(1, 10000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - if v, want := h.Mean(), 500000.013312; v != want { - t.Errorf("Mean was %v, but expected %v", v, want) - } -} - -func TestStdDev(t *testing.T) { - h := hdrhistogram.New(1, 10000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - if v, want := h.StdDev(), 288675.1403682715; v != want { - t.Errorf("StdDev was %v, but expected %v", v, want) - } -} - -func TestTotalCount(t *testing.T) { - h := hdrhistogram.New(1, 10000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - if v, want := h.TotalCount(), int64(i+1); v != want { - t.Errorf("TotalCount was %v, but expected %v", v, want) - } - } -} - -func TestMax(t *testing.T) { - h := hdrhistogram.New(1, 10000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - if v, want := h.Max(), int64(1000447); v != want { - t.Errorf("Max was %v, but expected %v", v, want) - } -} - -func TestReset(t *testing.T) { - h := hdrhistogram.New(1, 10000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - h.Reset() - - if v, want := h.Max(), int64(0); v != want { - t.Errorf("Max was %v, but expected %v", v, want) - } -} - -func TestMerge(t *testing.T) { - h1 := hdrhistogram.New(1, 1000, 3) - h2 := hdrhistogram.New(1, 1000, 3) - - for i := 0; i < 100; i++ { - if err := h1.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - for i := 100; i < 200; i++ { - if err := h2.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - h1.Merge(h2) - - if v, want := h1.ValueAtQuantile(50), int64(99); v != want { - t.Errorf("Median was %v, but expected %v", v, want) - } -} - -func TestMin(t *testing.T) { - h := hdrhistogram.New(1, 10000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - if v, want := h.Min(), int64(0); v != want { - t.Errorf("Min was %v, but expected %v", v, want) - } -} - -func TestByteSize(t *testing.T) { - h := hdrhistogram.New(1, 100000, 3) - - if v, want := h.ByteSize(), 65604; v != want { - t.Errorf("ByteSize was %v, but expected %d", v, want) - } -} - -func TestRecordCorrectedValue(t *testing.T) { - h := hdrhistogram.New(1, 100000, 3) - - if err := h.RecordCorrectedValue(10, 100); err != nil { - t.Fatal(err) - } - - if v, want := h.ValueAtQuantile(75), int64(10); v != want { - t.Errorf("Corrected value was %v, but expected %v", v, want) - } -} - -func TestRecordCorrectedValueStall(t *testing.T) { - h := hdrhistogram.New(1, 100000, 3) - - if err := h.RecordCorrectedValue(1000, 100); err != nil { - t.Fatal(err) - } - - if v, want := h.ValueAtQuantile(75), int64(800); v != want { - t.Errorf("Corrected value was %v, but expected %v", v, want) - } -} - -func TestCumulativeDistribution(t *testing.T) { - h := hdrhistogram.New(1, 100000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - actual := h.CumulativeDistribution() - expected := []hdrhistogram.Bracket{ - hdrhistogram.Bracket{Quantile: 0, Count: 1, ValueAt: 0}, - hdrhistogram.Bracket{Quantile: 50, Count: 500224, ValueAt: 500223}, - hdrhistogram.Bracket{Quantile: 75, Count: 750080, ValueAt: 750079}, - hdrhistogram.Bracket{Quantile: 87.5, Count: 875008, ValueAt: 875007}, - hdrhistogram.Bracket{Quantile: 93.75, Count: 937984, ValueAt: 937983}, - hdrhistogram.Bracket{Quantile: 96.875, Count: 969216, ValueAt: 969215}, - hdrhistogram.Bracket{Quantile: 98.4375, Count: 984576, ValueAt: 984575}, - hdrhistogram.Bracket{Quantile: 99.21875, Count: 992256, ValueAt: 992255}, - hdrhistogram.Bracket{Quantile: 99.609375, Count: 996352, ValueAt: 996351}, - hdrhistogram.Bracket{Quantile: 99.8046875, Count: 998400, ValueAt: 998399}, - hdrhistogram.Bracket{Quantile: 99.90234375, Count: 999424, ValueAt: 999423}, - hdrhistogram.Bracket{Quantile: 99.951171875, Count: 999936, ValueAt: 999935}, - hdrhistogram.Bracket{Quantile: 99.9755859375, Count: 999936, ValueAt: 999935}, - hdrhistogram.Bracket{Quantile: 99.98779296875, Count: 999936, ValueAt: 999935}, - hdrhistogram.Bracket{Quantile: 99.993896484375, Count: 1000000, ValueAt: 1000447}, - hdrhistogram.Bracket{Quantile: 100, Count: 1000000, ValueAt: 1000447}, - } - - if !reflect.DeepEqual(actual, expected) { - t.Errorf("CF was %#v, but expected %#v", actual, expected) - } -} - -func TestDistribution(t *testing.T) { - h := hdrhistogram.New(8, 1024, 3) - - for i := 0; i < 1024; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - actual := h.Distribution() - if len(actual) != 128 { - t.Errorf("Number of bars seen was %v, expected was 128", len(actual)) - } - for _, b := range actual { - if b.Count != 8 { - t.Errorf("Count per bar seen was %v, expected was 8", b.Count) - } - } -} - -func TestNaN(t *testing.T) { - h := hdrhistogram.New(1, 100000, 3) - if math.IsNaN(h.Mean()) { - t.Error("mean is NaN") - } - if math.IsNaN(h.StdDev()) { - t.Error("stddev is NaN") - } -} - -func TestSignificantFigures(t *testing.T) { - const sigFigs = 4 - h := hdrhistogram.New(1, 10, sigFigs) - if h.SignificantFigures() != sigFigs { - t.Errorf("Significant figures was %v, expected %d", h.SignificantFigures(), sigFigs) - } -} - -func TestLowestTrackableValue(t *testing.T) { - const minVal = 2 - h := hdrhistogram.New(minVal, 10, 3) - if h.LowestTrackableValue() != minVal { - t.Errorf("LowestTrackableValue figures was %v, expected %d", h.LowestTrackableValue(), minVal) - } -} - -func TestHighestTrackableValue(t *testing.T) { - const maxVal = 11 - h := hdrhistogram.New(1, maxVal, 3) - if h.HighestTrackableValue() != maxVal { - t.Errorf("HighestTrackableValue figures was %v, expected %d", h.HighestTrackableValue(), maxVal) - } -} - -func BenchmarkHistogramRecordValue(b *testing.B) { - h := hdrhistogram.New(1, 10000000, 3) - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - b.Fatal(err) - } - } - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - h.RecordValue(100) - } -} - -func BenchmarkNew(b *testing.B) { - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - hdrhistogram.New(1, 120000, 3) // this could track 1ms-2min - } -} - -func TestUnitMagnitudeOverflow(t *testing.T) { - h := hdrhistogram.New(0, 200, 4) - if err := h.RecordValue(11); err != nil { - t.Fatal(err) - } -} - -func TestSubBucketMaskOverflow(t *testing.T) { - hist := hdrhistogram.New(2e7, 1e8, 5) - for _, sample := range [...]int64{1e8, 2e7, 3e7} { - hist.RecordValue(sample) - } - - for q, want := range map[float64]int64{ - 50: 33554431, - 83.33: 33554431, - 83.34: 100663295, - 99: 100663295, - } { - if got := hist.ValueAtQuantile(q); got != want { - t.Errorf("got %d for %fth percentile. want: %d", got, q, want) - } - } -} - -func TestExportImport(t *testing.T) { - min := int64(1) - max := int64(10000000) - sigfigs := 3 - h := hdrhistogram.New(min, max, sigfigs) - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - s := h.Export() - - if v := s.LowestTrackableValue; v != min { - t.Errorf("LowestTrackableValue was %v, but expected %v", v, min) - } - - if v := s.HighestTrackableValue; v != max { - t.Errorf("HighestTrackableValue was %v, but expected %v", v, max) - } - - if v := int(s.SignificantFigures); v != sigfigs { - t.Errorf("SignificantFigures was %v, but expected %v", v, sigfigs) - } - - if imported := hdrhistogram.Import(s); !imported.Equals(h) { - t.Error("Expected Histograms to be equivalent") - } - -} - -func TestEquals(t *testing.T) { - h1 := hdrhistogram.New(1, 10000000, 3) - for i := 0; i < 1000000; i++ { - if err := h1.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - h2 := hdrhistogram.New(1, 10000000, 3) - for i := 0; i < 10000; i++ { - if err := h1.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - if h1.Equals(h2) { - t.Error("Expected Histograms to not be equivalent") - } - - h1.Reset() - h2.Reset() - - if !h1.Equals(h2) { - t.Error("Expected Histograms to be equivalent") - } -} diff --git a/vendor/github.com/codahale/hdrhistogram/window.go b/vendor/github.com/codahale/hdrhistogram/window.go deleted file mode 100644 index dc43612a..00000000 --- a/vendor/github.com/codahale/hdrhistogram/window.go +++ /dev/null @@ -1,45 +0,0 @@ -package hdrhistogram - -// A WindowedHistogram combines histograms to provide windowed statistics. -type WindowedHistogram struct { - idx int - h []Histogram - m *Histogram - - Current *Histogram -} - -// NewWindowed creates a new WindowedHistogram with N underlying histograms with -// the given parameters. -func NewWindowed(n int, minValue, maxValue int64, sigfigs int) *WindowedHistogram { - w := WindowedHistogram{ - idx: -1, - h: make([]Histogram, n), - m: New(minValue, maxValue, sigfigs), - } - - for i := range w.h { - w.h[i] = *New(minValue, maxValue, sigfigs) - } - w.Rotate() - - return &w -} - -// Merge returns a histogram which includes the recorded values from all the -// sections of the window. -func (w *WindowedHistogram) Merge() *Histogram { - w.m.Reset() - for _, h := range w.h { - w.m.Merge(&h) - } - return w.m -} - -// Rotate resets the oldest histogram and rotates it to be used as the current -// histogram. -func (w *WindowedHistogram) Rotate() { - w.idx++ - w.Current = &w.h[w.idx%len(w.h)] - w.Current.Reset() -} diff --git a/vendor/github.com/codahale/hdrhistogram/window_test.go b/vendor/github.com/codahale/hdrhistogram/window_test.go deleted file mode 100644 index 7e787588..00000000 --- a/vendor/github.com/codahale/hdrhistogram/window_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package hdrhistogram_test - -import ( - "testing" - - "github.com/codahale/hdrhistogram" -) - -func TestWindowedHistogram(t *testing.T) { - w := hdrhistogram.NewWindowed(2, 1, 1000, 3) - - for i := 0; i < 100; i++ { - w.Current.RecordValue(int64(i)) - } - w.Rotate() - - for i := 100; i < 200; i++ { - w.Current.RecordValue(int64(i)) - } - w.Rotate() - - for i := 200; i < 300; i++ { - w.Current.RecordValue(int64(i)) - } - - if v, want := w.Merge().ValueAtQuantile(50), int64(199); v != want { - t.Errorf("Median was %v, but expected %v", v, want) - } -} - -func BenchmarkWindowedHistogramRecordAndRotate(b *testing.B) { - w := hdrhistogram.NewWindowed(3, 1, 10000000, 3) - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - if err := w.Current.RecordValue(100); err != nil { - b.Fatal(err) - } - - if i%100000 == 1 { - w.Rotate() - } - } -} - -func BenchmarkWindowedHistogramMerge(b *testing.B) { - w := hdrhistogram.NewWindowed(3, 1, 10000000, 3) - for i := 0; i < 10000000; i++ { - if err := w.Current.RecordValue(100); err != nil { - b.Fatal(err) - } - - if i%100000 == 1 { - w.Rotate() - } - } - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - w.Merge() - } -} -- 2.11.0