OSDN Git Service

8b6f1f4e6c5c515bcfaa123613390f790d6be205
[bytom/vapor.git] / blockchain / pseudohsm / keycache_test.go
1 package pseudohsm
2
3 import (
4         "encoding/hex"
5         "fmt"
6         "io/ioutil"
7         "math/rand"
8         "os"
9         "path/filepath"
10         "reflect"
11         "sort"
12         "testing"
13         "time"
14
15         "github.com/cespare/cp"
16         "github.com/davecgh/go-spew/spew"
17         "github.com/vapor/crypto/ed25519/chainkd"
18 )
19
20 var (
21         cachetestDir, _ = filepath.Abs(filepath.Join("testdata", "keystore"))
22         cachetestKeys   = []XPub{
23                 {
24                         Alias: "langyu",
25                         File:  filepath.Join(cachetestDir, "UTC--2017-09-13T07-11-07.863320100Z--bm1pktmny6q69dlqulja2p2ja28k2vd6wvqpk5r76a"),
26                 },
27                 {
28                         Alias: "aaatest",
29                         File:  filepath.Join(cachetestDir, "aaa"),
30                 },
31                 {
32                         Alias: "zzztest",
33                         File:  filepath.Join(cachetestDir, "zzz"),
34                 },
35         }
36 )
37
38 func TestWatchNewFile(t *testing.T) {
39         t.Parallel()
40
41         dir, kc := tmpManager(t)
42         // defer os.RemoveAll(dir)
43
44         // Ensure the watcher is started before adding any files.
45         kc.keys()
46         time.Sleep(200 * time.Millisecond)
47         // Move in the files.
48         wantKeystores := make([]XPub, len(cachetestKeys))
49         for i := range cachetestKeys {
50                 a := cachetestKeys[i]
51                 a.File = filepath.Join(dir, filepath.Base(a.File))
52                 wantKeystores[i] = a
53                 if err := cp.CopyFile(a.File, cachetestKeys[i].File); err != nil {
54                         t.Fatal(err)
55                 }
56         }
57
58         // kc should see the keys.
59         var list []XPub
60         for d := 200 * time.Millisecond; d < 5*time.Second; d *= 2 {
61                 list = kc.keys()
62                 if reflect.DeepEqual(list, wantKeystores) {
63                         return
64                 }
65                 time.Sleep(d)
66         }
67         t.Errorf("got %s, want %s", spew.Sdump(list), spew.Sdump(wantKeystores))
68 }
69
70 func TestWatchNoDir(t *testing.T) {
71         t.Parallel()
72
73         // Create am but not the directory that it watches.
74         rand.Seed(time.Now().UnixNano())
75         dir := filepath.Join(os.TempDir(), fmt.Sprintf("bytom-keystore-watch-test-%d-%d", os.Getpid(), rand.Int()))
76         kc := newKeyCache(dir)
77         list := kc.keys()
78         if len(list) > 0 {
79                 t.Error("initial account list not empty:", list)
80         }
81         time.Sleep(100 * time.Millisecond)
82
83         // Create the directory and copy a key file into it.
84         os.MkdirAll(dir, 0700)
85         defer os.RemoveAll(dir)
86         file := filepath.Join(dir, "aaa")
87         if err := cp.CopyFile(file, cachetestKeys[0].File); err != nil {
88                 t.Fatal(err)
89         }
90
91         // am should see the account.
92         wantKeys := []XPub{cachetestKeys[0]}
93         wantKeys[0].File = file
94         for d := 200 * time.Millisecond; d < 8*time.Second; d *= 2 {
95                 list = kc.keys()
96                 if reflect.DeepEqual(list, wantKeys) {
97                         return
98                 }
99                 time.Sleep(d)
100         }
101         t.Errorf("\ngot  %v\nwant %v", list, wantKeys)
102 }
103
104 func TestCacheInitialReload(t *testing.T) {
105         cache := newKeyCache(cachetestDir)
106         keys := cache.keys()
107         if !reflect.DeepEqual(keys, cachetestKeys) {
108                 t.Fatalf("got initial accounts: %swant %s", spew.Sdump(keys), spew.Sdump(cachetestKeys))
109         }
110 }
111
112 func TestCacheAddDeleteOrder(t *testing.T) {
113         cache := newKeyCache("testdata/no-such-dir")
114         cache.watcher.running = true // prevent unexpected reloads
115         r := rand.New(rand.NewSource(time.Now().UnixNano()))
116
117         keys := []XPub{
118                 {
119                         Alias: "bm1pvheagygs9d72stp79u9vduhmdyjpnvud0y89y7",
120                         File:  "-309830980",
121                         XPub:  tmpPubkeys(t, r),
122                 },
123                 {
124                         Alias: "bm1pyk3qny8gzem6p4fx8t5d344tnldguv8lvx2aww",
125                         File:  "ggg",
126                         XPub:  tmpPubkeys(t, r),
127                 },
128                 {
129                         Alias: "bm1p6s0ckxrudy7hqht4n5fhcs4gp69krv3c84jn9x",
130                         File:  "zzzzzz-the-very-last-one.keyXXX",
131                         XPub:  tmpPubkeys(t, r),
132                 },
133                 {
134                         Alias: "bm1p7xkfhsw50y44t63mk0dfxxkvuyg6t3s0r6xs54",
135                         File:  "SOMETHING.key",
136                         XPub:  tmpPubkeys(t, r),
137                 },
138                 {
139                         Alias: "bm1peu9ql7x8c7aeca60j40sg5w4kylpf7l3jmau0g",
140                         File:  "UTC--2016-03-22T12-57-55.920751759Z--bm1peu9ql7x8c7aeca60j40sg5w4kylpf7l3jmau0g",
141                         XPub:  tmpPubkeys(t, r),
142                 },
143                 {
144                         Alias: "bm1p0s68e4ggp0vy5ue2lztsxvl2smpnqp9al8jyvh",
145                         File:  "aaa",
146                         XPub:  tmpPubkeys(t, r),
147                 },
148                 {
149                         Alias: "bm1pjq8ttfl7ppqtcc5qqff0s45p7ew9l9pjmlu5xw",
150                         File:  "zzz",
151                         XPub:  tmpPubkeys(t, r),
152                 },
153         }
154         for _, a := range keys {
155                 cache.add(a)
156         }
157         // Add some of them twice to check that they don't get reinserted.
158         cache.add(keys[0])
159         cache.add(keys[2])
160
161         // Check that the account list is sorted by filename.
162         wantKeys := make([]XPub, len(keys))
163         copy(wantKeys, keys)
164         sort.Sort(keysByFile(wantKeys))
165         list := cache.keys()
166
167         if !reflect.DeepEqual(list, wantKeys) {
168                 t.Fatalf("got keys: %s\nwant %s", spew.Sdump(keys), spew.Sdump(wantKeys))
169         }
170
171         for _, a := range keys {
172                 if !cache.hasKey(a.XPub) {
173                         t.Errorf("expected hashKey(%x) to return true", a.XPub)
174                 }
175         }
176         // Delete a few keys from the cache.
177         for i := 0; i < len(keys); i += 2 {
178                 cache.delete(wantKeys[i])
179         }
180         cache.delete(XPub{Alias: "bm1pug2xpcvpzepdf0paulnndhpxtpjvre8ypd0jtj", File: "something", XPub: tmpPubkeys(t, r)})
181
182         // Check content again after deletion.
183         wantKeysAfterDelete := []XPub{
184                 wantKeys[1],
185                 wantKeys[3],
186                 wantKeys[5],
187         }
188         list = cache.keys()
189         if !reflect.DeepEqual(list, wantKeysAfterDelete) {
190                 t.Fatalf("got keys after delete: %s\nwant %s", spew.Sdump(list), spew.Sdump(wantKeysAfterDelete))
191         }
192         for _, a := range wantKeysAfterDelete {
193                 if !cache.hasKey(a.XPub) {
194                         t.Errorf("expected hasKey(%x) to return true", a.XPub)
195                 }
196         }
197         if cache.hasKey(wantKeys[0].XPub) {
198                 t.Errorf("expected hasKey(%x) to return false", wantKeys[0].XPub)
199         }
200 }
201
202 func TestCacheFind(t *testing.T) {
203         dir := filepath.Join("testdata", "dir")
204         cache := newKeyCache(dir)
205         cache.watcher.running = true // prevent unexpected reloads
206         r := rand.New(rand.NewSource(time.Now().UnixNano()))
207
208         dup := tmpPubkeys(t, r)
209         keys := []XPub{
210                 {
211                         Alias: "bm1pmv9kg68j3edvqrv62lxllev4ugjv0zf6g5pwf6",
212                         File:  filepath.Join(dir, "a.key"),
213                         XPub:  tmpPubkeys(t, r),
214                 },
215                 {
216                         Alias: "bm1ptspg4x6kjjp642gdpzan0ynq9zr7z4m34nqpet",
217                         File:  filepath.Join(dir, "b.key"),
218                         XPub:  tmpPubkeys(t, r),
219                 },
220                 {
221                         Alias: "bm1pmlpy0946zsvdg29v80gw0mkq2n0ghkg0fpmhav",
222                         File:  filepath.Join(dir, "c.key"),
223                         XPub:  dup,
224                 },
225                 {
226                         Alias: "bm1pmlpy0946zsvdg29v80gw0mkq2n0ghkg0fpmhav",
227                         File:  filepath.Join(dir, "c2.key"),
228                         XPub:  dup,
229                 },
230         }
231         for _, a := range keys {
232                 cache.add(a)
233         }
234
235         nomatchKey := XPub{
236                 Alias: "bm1pu2vmgps4d9e3mrsuzp58w777apky4rjgn5rn9e",
237                 File:  filepath.Join(dir, "something"),
238                 XPub:  tmpPubkeys(t, r),
239         }
240         tests := []struct {
241                 Query      XPub
242                 WantResult XPub
243                 WantError  error
244         }{
245                 // by xpub
246                 {Query: XPub{XPub: keys[0].XPub}, WantResult: keys[0]},
247                 // by file
248                 {Query: XPub{File: keys[0].File}, WantResult: keys[0]},
249                 // by basename
250                 {Query: XPub{File: filepath.Base(keys[0].File)}, WantResult: keys[0]},
251                 // by file and xpub
252                 {Query: keys[0], WantResult: keys[0]},
253                 // ambiguous xpub, tie resolved by file
254                 {Query: keys[2], WantResult: keys[2]},
255                 // ambiguous xpub error
256                 {
257                         Query: XPub{XPub: keys[2].XPub},
258                         WantError: &AmbiguousKeyError{
259                                 Pubkey:  hex.EncodeToString(keys[2].XPub[:]),
260                                 Matches: []XPub{keys[2], keys[3]},
261                         },
262                 },
263                 // no match error
264                 {Query: nomatchKey, WantError: ErrLoadKey},
265                 {Query: XPub{File: nomatchKey.File}, WantError: ErrLoadKey},
266                 {Query: XPub{File: filepath.Base(nomatchKey.File)}, WantError: ErrLoadKey},
267                 {Query: XPub{XPub: nomatchKey.XPub}, WantError: ErrLoadKey},
268         }
269         for i, test := range tests {
270                 a, err := cache.find(test.Query)
271                 if !reflect.DeepEqual(err, test.WantError) {
272                         t.Errorf("test %d: error mismatch for query %v\ngot %q\nwant %q", i, test.Query, err, test.WantError)
273                         continue
274                 }
275                 if a != test.WantResult {
276                         t.Errorf("test %d: result mismatch for query %v\ngot %v\nwant %v", i, test.Query, a, test.WantResult)
277                         continue
278                 }
279         }
280 }
281
282 func tmpManager(t *testing.T) (string, *keyCache) {
283         d, err := ioutil.TempDir("", "bytom-keystore-test")
284         if err != nil {
285                 t.Fatal(err)
286         }
287         return d, newKeyCache(d)
288 }
289
290 func tmpPubkeys(t *testing.T, r *rand.Rand) chainkd.XPub {
291
292         var xpub chainkd.XPub
293         pick := "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
294         bytes := []byte(pick)
295         result := []byte{}
296
297         for i := 0; i < 64; i++ {
298                 result = append(result, bytes[r.Intn(len(bytes))])
299         }
300         copy(xpub[:], result[:])
301         return xpub
302 }