OSDN Git Service

Added metrics
authorgguoss <1536310027@qq.com>
Fri, 25 Aug 2017 05:11:52 +0000 (13:11 +0800)
committergguoss <1536310027@qq.com>
Fri, 25 Aug 2017 05:11:52 +0000 (13:11 +0800)
blockchain/metrics.go [new file with mode: 0644]
metrics/bench_test.go [new file with mode: 0644]
metrics/metrics.go [new file with mode: 0644]
metrics/metrics_test.go [new file with mode: 0644]

diff --git a/blockchain/metrics.go b/blockchain/metrics.go
new file mode 100644 (file)
index 0000000..3ae2a19
--- /dev/null
@@ -0,0 +1,70 @@
+package core
+
+import (
+       "expvar"
+       "net/http"
+       "sync"
+       "time"
+
+       "chain/metrics"
+       "chain/net/http/reqid"
+)
+
+var (
+       latencyMu sync.Mutex
+       latencies = map[string]*metrics.RotatingLatency{}
+
+       latencyRange = map[string]time.Duration{
+               crosscoreRPCPrefix + "get-block":         20 * time.Second,
+               crosscoreRPCPrefix + "signer/sign-block": 5 * time.Second,
+               crosscoreRPCPrefix + "get-snapshot":      30 * time.Second,
+               // the rest have a default range
+       }
+)
+
+// latency returns a rotating latency histogram for the given request.
+func latency(tab *http.ServeMux, req *http.Request) *metrics.RotatingLatency {
+       latencyMu.Lock()
+       defer latencyMu.Unlock()
+       if l := latencies[req.URL.Path]; l != nil {
+               return l
+       }
+       // Create a histogram only if the path is legit.
+       if _, pat := tab.Handler(req); pat == req.URL.Path {
+               d, ok := latencyRange[req.URL.Path]
+               if !ok {
+                       d = 100 * time.Millisecond
+               }
+               l := metrics.NewRotatingLatency(5, d)
+               latencies[req.URL.Path] = l
+               metrics.PublishLatency(req.URL.Path, l)
+               return l
+       }
+       return nil
+}
+
+var (
+       ncoreMu   sync.Mutex
+       ncore     = expvar.NewInt("ncore")
+       ncoreTime time.Time
+       coresSeen map[string]bool
+)
+
+func coreCounter(h http.Handler) http.Handler {
+       return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+               countCore(reqid.CoreIDFromContext(req.Context()))
+               h.ServeHTTP(w, req)
+       })
+}
+
+func countCore(id string) {
+       t := time.Now()
+       ncoreMu.Lock()
+       defer ncoreMu.Unlock()
+       if t.Sub(ncoreTime) > time.Minute {
+               ncore.Set(int64(len(coresSeen)))
+               ncoreTime = t
+               coresSeen = make(map[string]bool)
+       }
+       coresSeen[id] = true
+}
diff --git a/metrics/bench_test.go b/metrics/bench_test.go
new file mode 100644 (file)
index 0000000..75a03f3
--- /dev/null
@@ -0,0 +1,22 @@
+package metrics
+
+import (
+       "fmt"
+       "io/ioutil"
+       "testing"
+       "time"
+)
+
+func BenchmarkRecord(b *testing.B) {
+       rot := NewRotatingLatency(5, time.Second)
+       for i := 0; i < b.N; i++ {
+               rot.Record(0)
+       }
+}
+
+func BenchmarkStringFormat(b *testing.B) {
+       rot := NewRotatingLatency(5, time.Second)
+       for i := 0; i < b.N; i++ {
+               fmt.Fprintf(ioutil.Discard, "%s", rot)
+       }
+}
diff --git a/metrics/metrics.go b/metrics/metrics.go
new file mode 100644 (file)
index 0000000..89f2f00
--- /dev/null
@@ -0,0 +1,191 @@
+// Package metrics provides convenient facilities to record
+// on-line high-level performance metrics.
+package metrics
+
+import (
+       "bytes"
+       "encoding/json"
+       "expvar"
+       "fmt"
+       "sync"
+       "time"
+
+       "github.com/codahale/hdrhistogram"
+)
+
+// Period is the size of a RotatingLatency bucket.
+// Each RotatingLatency will rotate once per Period.
+const Period = time.Minute
+
+var (
+       rotatingLatenciesMu sync.Mutex
+       rotatingLatencies   []*RotatingLatency
+       latencyExpvar       = expvar.NewMap("latency")
+)
+
+// PublishLatency publishes rl as an expvar inside the
+// global latency map (which is itself published under
+// the key "latency").
+func PublishLatency(key string, rl *RotatingLatency) {
+       latencyExpvar.Set(key, rl)
+}
+
+// A Latency records information about the aggregate latency
+// of an operation over time.
+// Internally it holds an HDR histogram (to three significant figures)
+// and a counter of attempts to record a value
+// greater than the histogram's max.
+type Latency struct {
+       limit time.Duration // readonly
+
+       time  time.Time
+       hdr   hdrhistogram.Histogram
+       nover int           // how many values were over limit
+       max   time.Duration // max recorded value (can be over limit)
+}
+
+// NewLatency returns a new latency histogram with the given
+// duration limit and with three significant figures of precision.
+func NewLatency(limit time.Duration) *Latency {
+       return &Latency{
+               hdr:   *hdrhistogram.New(0, int64(limit), 2),
+               limit: limit,
+       }
+}
+
+// Record attempts to record a duration in the histogram.
+// If d is greater than the max allowed duration,
+// it increments a counter instead.
+func (l *Latency) Record(d time.Duration) {
+       if d > l.max {
+               l.max = d
+       }
+       if d > l.limit {
+               l.nover++
+       } else {
+               l.hdr.RecordValue(int64(d))
+       }
+}
+
+// Reset resets l to is original empty state.
+func (l *Latency) Reset() {
+       l.hdr.Reset()
+       l.nover = 0
+}
+
+// String returns l as a JSON string.
+// This makes it suitable for use as an expvar.Val.
+func (l *Latency) String() string {
+       var b bytes.Buffer
+       fmt.Fprintf(&b, `{"Histogram":`)
+       h, _ := json.Marshal((&l.hdr).Export()) // #nosec
+       b.Write(h)
+       fmt.Fprintf(&b, `,"Over":%d,"Timestamp":%d,"Max":%d}`, l.nover, l.time.Unix(), l.max)
+       return b.String()
+}
+
+// A RotatingLatency holds a rotating circular buffer of Latency objects,
+// that rotates once per Period time.
+// It can be used as an expvar Val.
+// Its exported methods are safe to call concurrently.
+type RotatingLatency struct {
+       mu  sync.Mutex
+       l   []Latency
+       n   int
+       cur *Latency
+}
+
+// NewRotatingLatency returns a new rotating latency recorder
+// with n buckets of history.
+func NewRotatingLatency(n int, max time.Duration) *RotatingLatency {
+       r := &RotatingLatency{
+               l: make([]Latency, n),
+       }
+       for i := range r.l {
+               r.l[i] = *NewLatency(max)
+       }
+       r.rotate()
+       rotatingLatenciesMu.Lock()
+       rotatingLatencies = append(rotatingLatencies, r)
+       rotatingLatenciesMu.Unlock()
+       return r
+}
+
+// Record attempts to record a duration in the current Latency in r.
+// If d is greater than the max allowed duration,
+// it increments a counter instead.
+func (r *RotatingLatency) Record(d time.Duration) {
+       r.mu.Lock()
+       r.cur.Record(d)
+       r.mu.Unlock()
+}
+
+func (r *RotatingLatency) RecordSince(t0 time.Time) {
+       r.Record(time.Since(t0))
+}
+
+func (r *RotatingLatency) rotate() {
+       r.mu.Lock()
+       defer r.mu.Unlock()
+       if r.cur != nil {
+               r.cur.time = time.Now()
+       }
+       r.n++
+       r.cur = &r.l[r.n%len(r.l)]
+       r.cur.Reset()
+}
+
+// String returns r as a JSON string.
+// This makes it suitable for use as an expvar.Val.
+//
+// Example:
+//
+//  {
+//      "NumRot": 204,
+//      "Buckets": [
+//          {
+//              "Over": 4,
+//              "Histogram": {
+//                  "LowestTrackableValue": 0,
+//                  "HighestTrackableValue": 1000000000,
+//                  "SignificantFigures": 2,
+//                  "Counts": [2,0,15,...]
+//              }
+//          },
+//          ...
+//      ]
+//  }
+//
+// Note that the last bucket is actively recording values.
+// To collect complete and accurate data over a long time,
+// store the next-to-last bucket after each rotation.
+// The last bucket is only useful for a "live" view
+// with finer granularity than the rotation period (which is one minute).
+func (r *RotatingLatency) String() string {
+       r.mu.Lock()
+       defer r.mu.Unlock()
+       var b bytes.Buffer
+       fmt.Fprintf(&b, `{"Buckets":[`)
+       for i := range r.l {
+               if i > 0 {
+                       b.WriteByte(',')
+               }
+               j := (r.n + i + 1) % len(r.l)
+               fmt.Fprintf(&b, "%s", &r.l[j])
+       }
+       fmt.Fprintf(&b, `],"NumRot":%d}`, r.n)
+       return b.String()
+}
+
+func init() {
+       go func() {
+               for range time.Tick(Period) {
+                       rotatingLatenciesMu.Lock()
+                       a := rotatingLatencies
+                       rotatingLatenciesMu.Unlock()
+                       for _, rot := range a {
+                               rot.rotate()
+                       }
+               }
+       }()
+}
diff --git a/metrics/metrics_test.go b/metrics/metrics_test.go
new file mode 100644 (file)
index 0000000..ca294d5
--- /dev/null
@@ -0,0 +1,64 @@
+package metrics
+
+import (
+       "encoding/json"
+       "reflect"
+       "testing"
+       "time"
+
+       "github.com/codahale/hdrhistogram"
+)
+
+func TestRotString(t *testing.T) {
+       rot := NewRotatingLatency(2, time.Second)
+       rot.l[0].hdr = *hdrhistogram.New(0, int64(time.Second), 1)
+       rot.l[1].hdr = *hdrhistogram.New(1, int64(time.Second), 1) // "current"
+
+       want := `{
+               "NumRot": 1,
+               "Buckets": [
+                       {
+                               "Max": 0,
+                               "Over": 0,
+                               "Timestamp": -62135596800,
+                               "Histogram": {
+                                       "LowestTrackableValue": 0,
+                                       "HighestTrackableValue": 1000000000,
+                                       "SignificantFigures": 1,
+                                       "Counts": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
+                               }
+                       },
+                       {
+                               "Max": 0,
+                               "Over": 0,
+                               "Timestamp": -62135596800,
+                               "Histogram": {
+                                       "LowestTrackableValue": 1,
+                                       "HighestTrackableValue": 1000000000,
+                                       "SignificantFigures": 1,
+                                       "Counts": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
+                               }
+                       }
+               ]
+       }`
+
+       got := rot.String()
+       if !jsonIsEqual(t, got, want) {
+               t.Errorf("%#v.String() = %#q want %#q", rot, got, want)
+       }
+}
+
+func jsonIsEqual(t *testing.T, a, b string) bool {
+       var av, bv interface{}
+
+       err := json.Unmarshal([]byte(a), &av)
+       if err != nil {
+               t.Fatal(err, a)
+       }
+       err = json.Unmarshal([]byte(b), &bv)
+       if err != nil {
+               t.Fatal(err, b)
+       }
+
+       return reflect.DeepEqual(av, bv)
+}