OSDN Git Service

dm persistent data: add threshold callback to space map
[uclinux-h8/linux.git] / drivers / md / persistent-data / dm-space-map-metadata.c
1 /*
2  * Copyright (C) 2011 Red Hat, Inc.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm-space-map.h"
8 #include "dm-space-map-common.h"
9 #include "dm-space-map-metadata.h"
10
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/device-mapper.h>
14
15 #define DM_MSG_PREFIX "space map metadata"
16
17 /*----------------------------------------------------------------*/
18
19 /*
20  * Space map interface.
21  *
22  * The low level disk format is written using the standard btree and
23  * transaction manager.  This means that performing disk operations may
24  * cause us to recurse into the space map in order to allocate new blocks.
25  * For this reason we have a pool of pre-allocated blocks large enough to
26  * service any metadata_ll_disk operation.
27  */
28
29 /*
30  * FIXME: we should calculate this based on the size of the device.
31  * Only the metadata space map needs this functionality.
32  */
33 #define MAX_RECURSIVE_ALLOCATIONS 1024
34
35 enum block_op_type {
36         BOP_INC,
37         BOP_DEC
38 };
39
40 struct block_op {
41         enum block_op_type type;
42         dm_block_t block;
43 };
44
45 struct sm_metadata {
46         struct dm_space_map sm;
47
48         struct ll_disk ll;
49         struct ll_disk old_ll;
50
51         dm_block_t begin;
52
53         unsigned recursion_count;
54         unsigned allocated_this_transaction;
55         unsigned nr_uncommitted;
56         struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS];
57 };
58
59 static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
60 {
61         struct block_op *op;
62
63         if (smm->nr_uncommitted == MAX_RECURSIVE_ALLOCATIONS) {
64                 DMERR("too many recursive allocations");
65                 return -ENOMEM;
66         }
67
68         op = smm->uncommitted + smm->nr_uncommitted++;
69         op->type = type;
70         op->block = b;
71
72         return 0;
73 }
74
75 static int commit_bop(struct sm_metadata *smm, struct block_op *op)
76 {
77         int r = 0;
78         enum allocation_event ev;
79
80         switch (op->type) {
81         case BOP_INC:
82                 r = sm_ll_inc(&smm->ll, op->block, &ev);
83                 break;
84
85         case BOP_DEC:
86                 r = sm_ll_dec(&smm->ll, op->block, &ev);
87                 break;
88         }
89
90         return r;
91 }
92
93 static void in(struct sm_metadata *smm)
94 {
95         smm->recursion_count++;
96 }
97
98 static int out(struct sm_metadata *smm)
99 {
100         int r = 0;
101
102         /*
103          * If we're not recursing then very bad things are happening.
104          */
105         if (!smm->recursion_count) {
106                 DMERR("lost track of recursion depth");
107                 return -ENOMEM;
108         }
109
110         if (smm->recursion_count == 1 && smm->nr_uncommitted) {
111                 while (smm->nr_uncommitted && !r) {
112                         smm->nr_uncommitted--;
113                         r = commit_bop(smm, smm->uncommitted +
114                                        smm->nr_uncommitted);
115                         if (r)
116                                 break;
117                 }
118         }
119
120         smm->recursion_count--;
121
122         return r;
123 }
124
125 /*
126  * When using the out() function above, we often want to combine an error
127  * code for the operation run in the recursive context with that from
128  * out().
129  */
130 static int combine_errors(int r1, int r2)
131 {
132         return r1 ? r1 : r2;
133 }
134
135 static int recursing(struct sm_metadata *smm)
136 {
137         return smm->recursion_count;
138 }
139
140 static void sm_metadata_destroy(struct dm_space_map *sm)
141 {
142         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
143
144         kfree(smm);
145 }
146
147 static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
148 {
149         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
150
151         *count = smm->ll.nr_blocks;
152
153         return 0;
154 }
155
156 static int sm_metadata_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
157 {
158         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
159
160         *count = smm->old_ll.nr_blocks - smm->old_ll.nr_allocated -
161                  smm->allocated_this_transaction;
162
163         return 0;
164 }
165
166 static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
167                                  uint32_t *result)
168 {
169         int r, i;
170         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
171         unsigned adjustment = 0;
172
173         /*
174          * We may have some uncommitted adjustments to add.  This list
175          * should always be really short.
176          */
177         for (i = 0; i < smm->nr_uncommitted; i++) {
178                 struct block_op *op = smm->uncommitted + i;
179
180                 if (op->block != b)
181                         continue;
182
183                 switch (op->type) {
184                 case BOP_INC:
185                         adjustment++;
186                         break;
187
188                 case BOP_DEC:
189                         adjustment--;
190                         break;
191                 }
192         }
193
194         r = sm_ll_lookup(&smm->ll, b, result);
195         if (r)
196                 return r;
197
198         *result += adjustment;
199
200         return 0;
201 }
202
203 static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
204                                               dm_block_t b, int *result)
205 {
206         int r, i, adjustment = 0;
207         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
208         uint32_t rc;
209
210         /*
211          * We may have some uncommitted adjustments to add.  This list
212          * should always be really short.
213          */
214         for (i = 0; i < smm->nr_uncommitted; i++) {
215                 struct block_op *op = smm->uncommitted + i;
216
217                 if (op->block != b)
218                         continue;
219
220                 switch (op->type) {
221                 case BOP_INC:
222                         adjustment++;
223                         break;
224
225                 case BOP_DEC:
226                         adjustment--;
227                         break;
228                 }
229         }
230
231         if (adjustment > 1) {
232                 *result = 1;
233                 return 0;
234         }
235
236         r = sm_ll_lookup_bitmap(&smm->ll, b, &rc);
237         if (r)
238                 return r;
239
240         if (rc == 3)
241                 /*
242                  * We err on the side of caution, and always return true.
243                  */
244                 *result = 1;
245         else
246                 *result = rc + adjustment > 1;
247
248         return 0;
249 }
250
251 static int sm_metadata_set_count(struct dm_space_map *sm, dm_block_t b,
252                                  uint32_t count)
253 {
254         int r, r2;
255         enum allocation_event ev;
256         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
257
258         if (smm->recursion_count) {
259                 DMERR("cannot recurse set_count()");
260                 return -EINVAL;
261         }
262
263         in(smm);
264         r = sm_ll_insert(&smm->ll, b, count, &ev);
265         r2 = out(smm);
266
267         return combine_errors(r, r2);
268 }
269
270 static int sm_metadata_inc_block(struct dm_space_map *sm, dm_block_t b)
271 {
272         int r, r2 = 0;
273         enum allocation_event ev;
274         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
275
276         if (recursing(smm))
277                 r = add_bop(smm, BOP_INC, b);
278         else {
279                 in(smm);
280                 r = sm_ll_inc(&smm->ll, b, &ev);
281                 r2 = out(smm);
282         }
283
284         return combine_errors(r, r2);
285 }
286
287 static int sm_metadata_dec_block(struct dm_space_map *sm, dm_block_t b)
288 {
289         int r, r2 = 0;
290         enum allocation_event ev;
291         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
292
293         if (recursing(smm))
294                 r = add_bop(smm, BOP_DEC, b);
295         else {
296                 in(smm);
297                 r = sm_ll_dec(&smm->ll, b, &ev);
298                 r2 = out(smm);
299         }
300
301         return combine_errors(r, r2);
302 }
303
304 static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
305 {
306         int r, r2 = 0;
307         enum allocation_event ev;
308         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
309
310         r = sm_ll_find_free_block(&smm->old_ll, smm->begin, smm->old_ll.nr_blocks, b);
311         if (r)
312                 return r;
313
314         smm->begin = *b + 1;
315
316         if (recursing(smm))
317                 r = add_bop(smm, BOP_INC, *b);
318         else {
319                 in(smm);
320                 r = sm_ll_inc(&smm->ll, *b, &ev);
321                 r2 = out(smm);
322         }
323
324         if (!r)
325                 smm->allocated_this_transaction++;
326
327         return combine_errors(r, r2);
328 }
329
330 static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
331 {
332         int r = sm_metadata_new_block_(sm, b);
333         if (r)
334                 DMERR("unable to allocate new metadata block");
335         return r;
336 }
337
338 static int sm_metadata_commit(struct dm_space_map *sm)
339 {
340         int r;
341         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
342
343         r = sm_ll_commit(&smm->ll);
344         if (r)
345                 return r;
346
347         memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
348         smm->begin = 0;
349         smm->allocated_this_transaction = 0;
350
351         return 0;
352 }
353
354 static int sm_metadata_root_size(struct dm_space_map *sm, size_t *result)
355 {
356         *result = sizeof(struct disk_sm_root);
357
358         return 0;
359 }
360
361 static int sm_metadata_copy_root(struct dm_space_map *sm, void *where_le, size_t max)
362 {
363         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
364         struct disk_sm_root root_le;
365
366         root_le.nr_blocks = cpu_to_le64(smm->ll.nr_blocks);
367         root_le.nr_allocated = cpu_to_le64(smm->ll.nr_allocated);
368         root_le.bitmap_root = cpu_to_le64(smm->ll.bitmap_root);
369         root_le.ref_count_root = cpu_to_le64(smm->ll.ref_count_root);
370
371         if (max < sizeof(root_le))
372                 return -ENOSPC;
373
374         memcpy(where_le, &root_le, sizeof(root_le));
375
376         return 0;
377 }
378
379 static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks);
380
381 static struct dm_space_map ops = {
382         .destroy = sm_metadata_destroy,
383         .extend = sm_metadata_extend,
384         .get_nr_blocks = sm_metadata_get_nr_blocks,
385         .get_nr_free = sm_metadata_get_nr_free,
386         .get_count = sm_metadata_get_count,
387         .count_is_more_than_one = sm_metadata_count_is_more_than_one,
388         .set_count = sm_metadata_set_count,
389         .inc_block = sm_metadata_inc_block,
390         .dec_block = sm_metadata_dec_block,
391         .new_block = sm_metadata_new_block,
392         .commit = sm_metadata_commit,
393         .root_size = sm_metadata_root_size,
394         .copy_root = sm_metadata_copy_root,
395         .register_threshold_callback = NULL
396 };
397
398 /*----------------------------------------------------------------*/
399
400 /*
401  * When a new space map is created that manages its own space.  We use
402  * this tiny bootstrap allocator.
403  */
404 static void sm_bootstrap_destroy(struct dm_space_map *sm)
405 {
406 }
407
408 static int sm_bootstrap_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
409 {
410         DMERR("bootstrap doesn't support extend");
411
412         return -EINVAL;
413 }
414
415 static int sm_bootstrap_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
416 {
417         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
418
419         return smm->ll.nr_blocks;
420 }
421
422 static int sm_bootstrap_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
423 {
424         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
425
426         *count = smm->ll.nr_blocks - smm->begin;
427
428         return 0;
429 }
430
431 static int sm_bootstrap_get_count(struct dm_space_map *sm, dm_block_t b,
432                                   uint32_t *result)
433 {
434         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
435
436         return b < smm->begin ? 1 : 0;
437 }
438
439 static int sm_bootstrap_count_is_more_than_one(struct dm_space_map *sm,
440                                                dm_block_t b, int *result)
441 {
442         *result = 0;
443
444         return 0;
445 }
446
447 static int sm_bootstrap_set_count(struct dm_space_map *sm, dm_block_t b,
448                                   uint32_t count)
449 {
450         DMERR("bootstrap doesn't support set_count");
451
452         return -EINVAL;
453 }
454
455 static int sm_bootstrap_new_block(struct dm_space_map *sm, dm_block_t *b)
456 {
457         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
458
459         /*
460          * We know the entire device is unused.
461          */
462         if (smm->begin == smm->ll.nr_blocks)
463                 return -ENOSPC;
464
465         *b = smm->begin++;
466
467         return 0;
468 }
469
470 static int sm_bootstrap_inc_block(struct dm_space_map *sm, dm_block_t b)
471 {
472         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
473
474         return add_bop(smm, BOP_INC, b);
475 }
476
477 static int sm_bootstrap_dec_block(struct dm_space_map *sm, dm_block_t b)
478 {
479         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
480
481         return add_bop(smm, BOP_DEC, b);
482 }
483
484 static int sm_bootstrap_commit(struct dm_space_map *sm)
485 {
486         return 0;
487 }
488
489 static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result)
490 {
491         DMERR("bootstrap doesn't support root_size");
492
493         return -EINVAL;
494 }
495
496 static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where,
497                                   size_t max)
498 {
499         DMERR("bootstrap doesn't support copy_root");
500
501         return -EINVAL;
502 }
503
504 static struct dm_space_map bootstrap_ops = {
505         .destroy = sm_bootstrap_destroy,
506         .extend = sm_bootstrap_extend,
507         .get_nr_blocks = sm_bootstrap_get_nr_blocks,
508         .get_nr_free = sm_bootstrap_get_nr_free,
509         .get_count = sm_bootstrap_get_count,
510         .count_is_more_than_one = sm_bootstrap_count_is_more_than_one,
511         .set_count = sm_bootstrap_set_count,
512         .inc_block = sm_bootstrap_inc_block,
513         .dec_block = sm_bootstrap_dec_block,
514         .new_block = sm_bootstrap_new_block,
515         .commit = sm_bootstrap_commit,
516         .root_size = sm_bootstrap_root_size,
517         .copy_root = sm_bootstrap_copy_root,
518         .register_threshold_callback = NULL
519 };
520
521 /*----------------------------------------------------------------*/
522
523 static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
524 {
525         int r, i;
526         enum allocation_event ev;
527         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
528         dm_block_t old_len = smm->ll.nr_blocks;
529
530         /*
531          * Flick into a mode where all blocks get allocated in the new area.
532          */
533         smm->begin = old_len;
534         memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
535
536         /*
537          * Extend.
538          */
539         r = sm_ll_extend(&smm->ll, extra_blocks);
540
541         /*
542          * Switch back to normal behaviour.
543          */
544         memcpy(&smm->sm, &ops, sizeof(smm->sm));
545         for (i = old_len; !r && i < smm->begin; i++)
546                 r = sm_ll_inc(&smm->ll, i, &ev);
547
548         return r;
549 }
550
551 /*----------------------------------------------------------------*/
552
553 struct dm_space_map *dm_sm_metadata_init(void)
554 {
555         struct sm_metadata *smm;
556
557         smm = kmalloc(sizeof(*smm), GFP_KERNEL);
558         if (!smm)
559                 return ERR_PTR(-ENOMEM);
560
561         memcpy(&smm->sm, &ops, sizeof(smm->sm));
562
563         return &smm->sm;
564 }
565
566 int dm_sm_metadata_create(struct dm_space_map *sm,
567                           struct dm_transaction_manager *tm,
568                           dm_block_t nr_blocks,
569                           dm_block_t superblock)
570 {
571         int r;
572         dm_block_t i;
573         enum allocation_event ev;
574         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
575
576         smm->begin = superblock + 1;
577         smm->recursion_count = 0;
578         smm->allocated_this_transaction = 0;
579         smm->nr_uncommitted = 0;
580
581         memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
582
583         r = sm_ll_new_metadata(&smm->ll, tm);
584         if (r)
585                 return r;
586
587         r = sm_ll_extend(&smm->ll, nr_blocks);
588         if (r)
589                 return r;
590
591         memcpy(&smm->sm, &ops, sizeof(smm->sm));
592
593         /*
594          * Now we need to update the newly created data structures with the
595          * allocated blocks that they were built from.
596          */
597         for (i = superblock; !r && i < smm->begin; i++)
598                 r = sm_ll_inc(&smm->ll, i, &ev);
599
600         if (r)
601                 return r;
602
603         return sm_metadata_commit(sm);
604 }
605
606 int dm_sm_metadata_open(struct dm_space_map *sm,
607                         struct dm_transaction_manager *tm,
608                         void *root_le, size_t len)
609 {
610         int r;
611         struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
612
613         r = sm_ll_open_metadata(&smm->ll, tm, root_le, len);
614         if (r)
615                 return r;
616
617         smm->begin = 0;
618         smm->recursion_count = 0;
619         smm->allocated_this_transaction = 0;
620         smm->nr_uncommitted = 0;
621
622         memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
623         return 0;
624 }