OSDN Git Service

delete some comments
[bytom/vapor.git] / blockchain / pseudohsm / keycache_test.go
1 package pseudohsm
2
3 import (
4         "fmt"
5         "io/ioutil"
6         "math/rand"
7         "os"
8         "path/filepath"
9         "reflect"
10         "sort"
11         "testing"
12         "time"
13
14         "github.com/cespare/cp"
15         "github.com/davecgh/go-spew/spew"
16         edchainkd "github.com/vapor/crypto/ed25519/chainkd"
17 )
18
19 var (
20         cachetestDir, _ = filepath.Abs(filepath.Join("testdata", "keystore"))
21         cachetestKeys   = []XPub{
22                 {
23                         Alias: "langyu",
24                         File:  filepath.Join(cachetestDir, "UTC--2017-09-13T07-11-07.863320100Z--bm1pktmny6q69dlqulja2p2ja28k2vd6wvqpk5r76a"),
25                 },
26                 {
27                         Alias: "aaatest",
28                         File:  filepath.Join(cachetestDir, "aaa"),
29                 },
30                 {
31                         Alias: "zzztest",
32                         File:  filepath.Join(cachetestDir, "zzz"),
33                 },
34         }
35 )
36
37 func TestWatchNewFile(t *testing.T) {
38         t.Parallel()
39
40         dir, kc := tmpManager(t)
41         // defer os.RemoveAll(dir)
42
43         // Ensure the watcher is started before adding any files.
44         kc.keys()
45         time.Sleep(200 * time.Millisecond)
46         // Move in the files.
47         wantKeystores := make([]XPub, len(cachetestKeys))
48         for i := range cachetestKeys {
49                 a := cachetestKeys[i]
50                 a.File = filepath.Join(dir, filepath.Base(a.File))
51                 wantKeystores[i] = a
52                 if err := cp.CopyFile(a.File, cachetestKeys[i].File); err != nil {
53                         t.Fatal(err)
54                 }
55         }
56
57         // kc should see the keys.
58         var list []XPub
59         for d := 200 * time.Millisecond; d < 5*time.Second; d *= 2 {
60                 list = kc.keys()
61                 if reflect.DeepEqual(list, wantKeystores) {
62                         return
63                 }
64                 time.Sleep(d)
65         }
66         t.Errorf("got %s, want %s", spew.Sdump(list), spew.Sdump(wantKeystores))
67 }
68
69 func TestWatchNoDir(t *testing.T) {
70         t.Parallel()
71
72         // Create am but not the directory that it watches.
73         rand.Seed(time.Now().UnixNano())
74         dir := filepath.Join(os.TempDir(), fmt.Sprintf("bytom-keystore-watch-test-%d-%d", os.Getpid(), rand.Int()))
75         kc := newKeyCache(dir)
76         list := kc.keys()
77         if len(list) > 0 {
78                 t.Error("initial account list not empty:", list)
79         }
80         time.Sleep(100 * time.Millisecond)
81
82         // Create the directory and copy a key file into it.
83         os.MkdirAll(dir, 0700)
84         defer os.RemoveAll(dir)
85         file := filepath.Join(dir, "aaa")
86         if err := cp.CopyFile(file, cachetestKeys[0].File); err != nil {
87                 t.Fatal(err)
88         }
89
90         // am should see the account.
91         wantKeys := []XPub{cachetestKeys[0]}
92         wantKeys[0].File = file
93         for d := 200 * time.Millisecond; d < 8*time.Second; d *= 2 {
94                 list = kc.keys()
95                 if reflect.DeepEqual(list, wantKeys) {
96                         return
97                 }
98                 time.Sleep(d)
99         }
100         t.Errorf("\ngot  %v\nwant %v", list, wantKeys)
101 }
102
103 func TestCacheInitialReload(t *testing.T) {
104         cache := newKeyCache(cachetestDir)
105         keys := cache.keys()
106         if !reflect.DeepEqual(keys, cachetestKeys) {
107                 t.Fatalf("got initial accounts: %swant %s", spew.Sdump(keys), spew.Sdump(cachetestKeys))
108         }
109 }
110
111 func TestCacheAddDeleteOrder(t *testing.T) {
112         cache := newKeyCache("testdata/no-such-dir")
113         cache.watcher.running = true // prevent unexpected reloads
114         r := rand.New(rand.NewSource(time.Now().UnixNano()))
115
116         keys := []XPub{
117                 {
118                         Alias: "bm1pvheagygs9d72stp79u9vduhmdyjpnvud0y89y7",
119                         File:  "-309830980",
120                         XPub:  tmpEdPubkeys(t, r),
121                 },
122                 {
123                         Alias: "bm1pyk3qny8gzem6p4fx8t5d344tnldguv8lvx2aww",
124                         File:  "ggg",
125                         XPub:  tmpEdPubkeys(t, r),
126                 },
127                 {
128                         Alias: "bm1p6s0ckxrudy7hqht4n5fhcs4gp69krv3c84jn9x",
129                         File:  "zzzzzz-the-very-last-one.keyXXX",
130                         XPub:  tmpEdPubkeys(t, r),
131                 },
132                 {
133                         Alias: "bm1p7xkfhsw50y44t63mk0dfxxkvuyg6t3s0r6xs54",
134                         File:  "SOMETHING.key",
135                         XPub:  tmpEdPubkeys(t, r),
136                 },
137                 {
138                         Alias: "bm1peu9ql7x8c7aeca60j40sg5w4kylpf7l3jmau0g",
139                         File:  "UTC--2016-03-22T12-57-55.920751759Z--bm1peu9ql7x8c7aeca60j40sg5w4kylpf7l3jmau0g",
140                         XPub:  tmpEdPubkeys(t, r),
141                 },
142                 {
143                         Alias: "bm1p0s68e4ggp0vy5ue2lztsxvl2smpnqp9al8jyvh",
144                         File:  "aaa",
145                         XPub:  tmpEdPubkeys(t, r),
146                 },
147                 {
148                         Alias: "bm1pjq8ttfl7ppqtcc5qqff0s45p7ew9l9pjmlu5xw",
149                         File:  "zzz",
150                         XPub:  tmpEdPubkeys(t, r),
151                 },
152         }
153         for _, a := range keys {
154                 cache.add(a)
155         }
156         // Add some of them twice to check that they don't get reinserted.
157         cache.add(keys[0])
158         cache.add(keys[2])
159
160         // Check that the account list is sorted by filename.
161         wantKeys := make([]XPub, len(keys))
162         copy(wantKeys, keys)
163         sort.Sort(keysByFile(wantKeys))
164         list := cache.keys()
165
166         if !reflect.DeepEqual(list, wantKeys) {
167                 t.Fatalf("got keys: %s\nwant %s", spew.Sdump(keys), spew.Sdump(wantKeys))
168         }
169
170         for _, a := range keys {
171                 if !cache.hasKey(a.XPub) {
172                         t.Errorf("expected hashKey(%x) to return true", a.XPub)
173                 }
174         }
175         // Delete a few keys from the cache.
176         for i := 0; i < len(keys); i += 2 {
177                 cache.delete(wantKeys[i])
178         }
179         cache.delete(XPub{Alias: "bm1pug2xpcvpzepdf0paulnndhpxtpjvre8ypd0jtj", File: "something", XPub: tmpEdPubkeys(t, r)})
180
181         // Check content again after deletion.
182         wantKeysAfterDelete := []XPub{
183                 wantKeys[1],
184                 wantKeys[3],
185                 wantKeys[5],
186         }
187         list = cache.keys()
188         if !reflect.DeepEqual(list, wantKeysAfterDelete) {
189                 t.Fatalf("got keys after delete: %s\nwant %s", spew.Sdump(list), spew.Sdump(wantKeysAfterDelete))
190         }
191         for _, a := range wantKeysAfterDelete {
192                 if !cache.hasKey(a.XPub) {
193                         t.Errorf("expected hasKey(%x) to return true", a.XPub)
194                 }
195         }
196         if cache.hasKey(wantKeys[0].XPub) {
197                 t.Errorf("expected hasKey(%x) to return false", wantKeys[0].XPub)
198         }
199 }
200
201 func TestCacheFind(t *testing.T) {
202         dir := filepath.Join("testdata", "dir")
203         cache := newKeyCache(dir)
204         cache.watcher.running = true // prevent unexpected reloads
205         r := rand.New(rand.NewSource(time.Now().UnixNano()))
206
207         dup := tmpEdPubkeys(t, r)
208         keys := []XPub{
209                 {
210                         Alias: "bm1pmv9kg68j3edvqrv62lxllev4ugjv0zf6g5pwf6",
211                         File:  filepath.Join(dir, "a.key"),
212                         XPub:  tmpEdPubkeys(t, r),
213                 },
214                 {
215                         Alias: "bm1ptspg4x6kjjp642gdpzan0ynq9zr7z4m34nqpet",
216                         File:  filepath.Join(dir, "b.key"),
217                         XPub:  tmpEdPubkeys(t, r),
218                 },
219                 {
220                         Alias: "bm1pmlpy0946zsvdg29v80gw0mkq2n0ghkg0fpmhav",
221                         File:  filepath.Join(dir, "c.key"),
222                         XPub:  dup,
223                 },
224                 {
225                         Alias: "bm1pmlpy0946zsvdg29v80gw0mkq2n0ghkg0fpmhav",
226                         File:  filepath.Join(dir, "c2.key"),
227                         XPub:  dup,
228                 },
229         }
230         for _, a := range keys {
231                 cache.add(a)
232         }
233
234         nomatchKey := XPub{
235                 Alias: "bm1pu2vmgps4d9e3mrsuzp58w777apky4rjgn5rn9e",
236                 File:  filepath.Join(dir, "something"),
237                 XPub:  tmpEdPubkeys(t, r),
238         }
239         tests := []struct {
240                 Query      XPub
241                 WantResult XPub
242                 WantError  error
243         }{
244                 // by xpub
245                 {Query: XPub{XPub: keys[0].XPub}, WantResult: keys[0]},
246                 // by file
247                 {Query: XPub{File: keys[0].File}, WantResult: keys[0]},
248                 // by basename
249                 {Query: XPub{File: filepath.Base(keys[0].File)}, WantResult: keys[0]},
250                 // by file and xpub
251                 {Query: keys[0], WantResult: keys[0]},
252                 // ambiguous xpub, tie resolved by file
253                 {Query: keys[2], WantResult: keys[2]},
254                 // ambiguous xpub error
255                 {
256                         Query: XPub{XPub: keys[2].XPub},
257                         WantError: &AmbiguousKeyError{
258                                 Pubkey:  xpubToString(keys[2].XPub),
259                                 Matches: []XPub{keys[2], keys[3]},
260                         },
261                 },
262                 // no match error
263                 {Query: nomatchKey, WantError: ErrLoadKey},
264                 {Query: XPub{File: nomatchKey.File}, WantError: ErrLoadKey},
265                 {Query: XPub{File: filepath.Base(nomatchKey.File)}, WantError: ErrLoadKey},
266                 {Query: XPub{XPub: nomatchKey.XPub}, WantError: ErrLoadKey},
267         }
268         for i, test := range tests {
269                 a, err := cache.find(test.Query)
270                 if !reflect.DeepEqual(err, test.WantError) {
271                         t.Errorf("test %d: error mismatch for query %v\ngot %q\nwant %q", i, test.Query, err, test.WantError)
272                         continue
273                 }
274                 if a != test.WantResult {
275                         t.Errorf("test %d: result mismatch for query %v\ngot %v\nwant %v", i, test.Query, a, test.WantResult)
276                         continue
277                 }
278         }
279 }
280
281 func tmpManager(t *testing.T) (string, *keyCache) {
282         d, err := ioutil.TempDir("", "bytom-keystore-test")
283         if err != nil {
284                 t.Fatal(err)
285         }
286         return d, newKeyCache(d)
287 }
288
289 func tmpEdPubkeys(t *testing.T, r *rand.Rand) edchainkd.XPub {
290         var xpub edchainkd.XPub
291         pick := "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
292         bytes := []byte(pick)
293         result := []byte{}
294         for i := 0; i < 64; i++ {
295                 result = append(result, bytes[r.Intn(len(bytes))])
296         }
297         copy(xpub[:], result[:])
298         return xpub
299 }