OSDN Git Service

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[android-x86/kernel.git] / block / blk-sysfs.c
1 /*
2  * Functions related to sysfs handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/blktrace_api.h>
10
11 #include "blk.h"
12
13 struct queue_sysfs_entry {
14         struct attribute attr;
15         ssize_t (*show)(struct request_queue *, char *);
16         ssize_t (*store)(struct request_queue *, const char *, size_t);
17 };
18
19 static ssize_t
20 queue_var_show(unsigned long var, char *page)
21 {
22         return sprintf(page, "%lu\n", var);
23 }
24
25 static ssize_t
26 queue_var_store(unsigned long *var, const char *page, size_t count)
27 {
28         char *p = (char *) page;
29
30         *var = simple_strtoul(p, &p, 10);
31         return count;
32 }
33
34 static ssize_t queue_requests_show(struct request_queue *q, char *page)
35 {
36         return queue_var_show(q->nr_requests, (page));
37 }
38
39 static ssize_t
40 queue_requests_store(struct request_queue *q, const char *page, size_t count)
41 {
42         struct request_list *rl = &q->rq;
43         unsigned long nr;
44         int ret;
45
46         if (!q->request_fn)
47                 return -EINVAL;
48
49         ret = queue_var_store(&nr, page, count);
50         if (nr < BLKDEV_MIN_RQ)
51                 nr = BLKDEV_MIN_RQ;
52
53         spin_lock_irq(q->queue_lock);
54         q->nr_requests = nr;
55         blk_queue_congestion_threshold(q);
56
57         if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
58                 blk_set_queue_congested(q, BLK_RW_SYNC);
59         else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
60                 blk_clear_queue_congested(q, BLK_RW_SYNC);
61
62         if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
63                 blk_set_queue_congested(q, BLK_RW_ASYNC);
64         else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
65                 blk_clear_queue_congested(q, BLK_RW_ASYNC);
66
67         if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
68                 blk_set_queue_full(q, BLK_RW_SYNC);
69         } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) {
70                 blk_clear_queue_full(q, BLK_RW_SYNC);
71                 wake_up(&rl->wait[BLK_RW_SYNC]);
72         }
73
74         if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
75                 blk_set_queue_full(q, BLK_RW_ASYNC);
76         } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) {
77                 blk_clear_queue_full(q, BLK_RW_ASYNC);
78                 wake_up(&rl->wait[BLK_RW_ASYNC]);
79         }
80         spin_unlock_irq(q->queue_lock);
81         return ret;
82 }
83
84 static ssize_t queue_ra_show(struct request_queue *q, char *page)
85 {
86         unsigned long ra_kb = q->backing_dev_info.ra_pages <<
87                                         (PAGE_CACHE_SHIFT - 10);
88
89         return queue_var_show(ra_kb, (page));
90 }
91
92 static ssize_t
93 queue_ra_store(struct request_queue *q, const char *page, size_t count)
94 {
95         unsigned long ra_kb;
96         ssize_t ret = queue_var_store(&ra_kb, page, count);
97
98         q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
99
100         return ret;
101 }
102
103 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
104 {
105         int max_sectors_kb = queue_max_sectors(q) >> 1;
106
107         return queue_var_show(max_sectors_kb, (page));
108 }
109
110 static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
111 {
112         return queue_var_show(queue_max_segments(q), (page));
113 }
114
115 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
116 {
117         if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
118                 return queue_var_show(queue_max_segment_size(q), (page));
119
120         return queue_var_show(PAGE_CACHE_SIZE, (page));
121 }
122
123 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
124 {
125         return queue_var_show(queue_logical_block_size(q), page);
126 }
127
128 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
129 {
130         return queue_var_show(queue_physical_block_size(q), page);
131 }
132
133 static ssize_t queue_io_min_show(struct request_queue *q, char *page)
134 {
135         return queue_var_show(queue_io_min(q), page);
136 }
137
138 static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
139 {
140         return queue_var_show(queue_io_opt(q), page);
141 }
142
143 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
144 {
145         return queue_var_show(q->limits.discard_granularity, page);
146 }
147
148 static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
149 {
150         return queue_var_show(q->limits.max_discard_sectors << 9, page);
151 }
152
153 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
154 {
155         return queue_var_show(queue_discard_zeroes_data(q), page);
156 }
157
158 static ssize_t
159 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
160 {
161         unsigned long max_sectors_kb,
162                 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
163                         page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
164         ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
165
166         if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
167                 return -EINVAL;
168
169         spin_lock_irq(q->queue_lock);
170         q->limits.max_sectors = max_sectors_kb << 1;
171         spin_unlock_irq(q->queue_lock);
172
173         return ret;
174 }
175
176 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
177 {
178         int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
179
180         return queue_var_show(max_hw_sectors_kb, (page));
181 }
182
183 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg)                            \
184 static ssize_t                                                          \
185 queue_show_##name(struct request_queue *q, char *page)                  \
186 {                                                                       \
187         int bit;                                                        \
188         bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);             \
189         return queue_var_show(neg ? !bit : bit, page);                  \
190 }                                                                       \
191 static ssize_t                                                          \
192 queue_store_##name(struct request_queue *q, const char *page, size_t count) \
193 {                                                                       \
194         unsigned long val;                                              \
195         ssize_t ret;                                                    \
196         ret = queue_var_store(&val, page, count);                       \
197         if (neg)                                                        \
198                 val = !val;                                             \
199                                                                         \
200         spin_lock_irq(q->queue_lock);                                   \
201         if (val)                                                        \
202                 queue_flag_set(QUEUE_FLAG_##flag, q);                   \
203         else                                                            \
204                 queue_flag_clear(QUEUE_FLAG_##flag, q);                 \
205         spin_unlock_irq(q->queue_lock);                                 \
206         return ret;                                                     \
207 }
208
209 QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
210 QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
211 QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
212 #undef QUEUE_SYSFS_BIT_FNS
213
214 static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
215 {
216         return queue_var_show((blk_queue_nomerges(q) << 1) |
217                                blk_queue_noxmerges(q), page);
218 }
219
220 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
221                                     size_t count)
222 {
223         unsigned long nm;
224         ssize_t ret = queue_var_store(&nm, page, count);
225
226         spin_lock_irq(q->queue_lock);
227         queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
228         queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
229         if (nm == 2)
230                 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
231         else if (nm)
232                 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
233         spin_unlock_irq(q->queue_lock);
234
235         return ret;
236 }
237
238 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
239 {
240         bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
241
242         return queue_var_show(set, page);
243 }
244
245 static ssize_t
246 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
247 {
248         ssize_t ret = -EINVAL;
249 #if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
250         unsigned long val;
251
252         ret = queue_var_store(&val, page, count);
253         spin_lock_irq(q->queue_lock);
254         if (val)
255                 queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
256         else
257                 queue_flag_clear(QUEUE_FLAG_SAME_COMP,  q);
258         spin_unlock_irq(q->queue_lock);
259 #endif
260         return ret;
261 }
262
263 static struct queue_sysfs_entry queue_requests_entry = {
264         .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
265         .show = queue_requests_show,
266         .store = queue_requests_store,
267 };
268
269 static struct queue_sysfs_entry queue_ra_entry = {
270         .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
271         .show = queue_ra_show,
272         .store = queue_ra_store,
273 };
274
275 static struct queue_sysfs_entry queue_max_sectors_entry = {
276         .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
277         .show = queue_max_sectors_show,
278         .store = queue_max_sectors_store,
279 };
280
281 static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
282         .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
283         .show = queue_max_hw_sectors_show,
284 };
285
286 static struct queue_sysfs_entry queue_max_segments_entry = {
287         .attr = {.name = "max_segments", .mode = S_IRUGO },
288         .show = queue_max_segments_show,
289 };
290
291 static struct queue_sysfs_entry queue_max_segment_size_entry = {
292         .attr = {.name = "max_segment_size", .mode = S_IRUGO },
293         .show = queue_max_segment_size_show,
294 };
295
296 static struct queue_sysfs_entry queue_iosched_entry = {
297         .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
298         .show = elv_iosched_show,
299         .store = elv_iosched_store,
300 };
301
302 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
303         .attr = {.name = "hw_sector_size", .mode = S_IRUGO },
304         .show = queue_logical_block_size_show,
305 };
306
307 static struct queue_sysfs_entry queue_logical_block_size_entry = {
308         .attr = {.name = "logical_block_size", .mode = S_IRUGO },
309         .show = queue_logical_block_size_show,
310 };
311
312 static struct queue_sysfs_entry queue_physical_block_size_entry = {
313         .attr = {.name = "physical_block_size", .mode = S_IRUGO },
314         .show = queue_physical_block_size_show,
315 };
316
317 static struct queue_sysfs_entry queue_io_min_entry = {
318         .attr = {.name = "minimum_io_size", .mode = S_IRUGO },
319         .show = queue_io_min_show,
320 };
321
322 static struct queue_sysfs_entry queue_io_opt_entry = {
323         .attr = {.name = "optimal_io_size", .mode = S_IRUGO },
324         .show = queue_io_opt_show,
325 };
326
327 static struct queue_sysfs_entry queue_discard_granularity_entry = {
328         .attr = {.name = "discard_granularity", .mode = S_IRUGO },
329         .show = queue_discard_granularity_show,
330 };
331
332 static struct queue_sysfs_entry queue_discard_max_entry = {
333         .attr = {.name = "discard_max_bytes", .mode = S_IRUGO },
334         .show = queue_discard_max_show,
335 };
336
337 static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
338         .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO },
339         .show = queue_discard_zeroes_data_show,
340 };
341
342 static struct queue_sysfs_entry queue_nonrot_entry = {
343         .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
344         .show = queue_show_nonrot,
345         .store = queue_store_nonrot,
346 };
347
348 static struct queue_sysfs_entry queue_nomerges_entry = {
349         .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
350         .show = queue_nomerges_show,
351         .store = queue_nomerges_store,
352 };
353
354 static struct queue_sysfs_entry queue_rq_affinity_entry = {
355         .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
356         .show = queue_rq_affinity_show,
357         .store = queue_rq_affinity_store,
358 };
359
360 static struct queue_sysfs_entry queue_iostats_entry = {
361         .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
362         .show = queue_show_iostats,
363         .store = queue_store_iostats,
364 };
365
366 static struct queue_sysfs_entry queue_random_entry = {
367         .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
368         .show = queue_show_random,
369         .store = queue_store_random,
370 };
371
372 static struct attribute *default_attrs[] = {
373         &queue_requests_entry.attr,
374         &queue_ra_entry.attr,
375         &queue_max_hw_sectors_entry.attr,
376         &queue_max_sectors_entry.attr,
377         &queue_max_segments_entry.attr,
378         &queue_max_segment_size_entry.attr,
379         &queue_iosched_entry.attr,
380         &queue_hw_sector_size_entry.attr,
381         &queue_logical_block_size_entry.attr,
382         &queue_physical_block_size_entry.attr,
383         &queue_io_min_entry.attr,
384         &queue_io_opt_entry.attr,
385         &queue_discard_granularity_entry.attr,
386         &queue_discard_max_entry.attr,
387         &queue_discard_zeroes_data_entry.attr,
388         &queue_nonrot_entry.attr,
389         &queue_nomerges_entry.attr,
390         &queue_rq_affinity_entry.attr,
391         &queue_iostats_entry.attr,
392         &queue_random_entry.attr,
393         NULL,
394 };
395
396 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
397
398 static ssize_t
399 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
400 {
401         struct queue_sysfs_entry *entry = to_queue(attr);
402         struct request_queue *q =
403                 container_of(kobj, struct request_queue, kobj);
404         ssize_t res;
405
406         if (!entry->show)
407                 return -EIO;
408         mutex_lock(&q->sysfs_lock);
409         if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
410                 mutex_unlock(&q->sysfs_lock);
411                 return -ENOENT;
412         }
413         res = entry->show(q, page);
414         mutex_unlock(&q->sysfs_lock);
415         return res;
416 }
417
418 static ssize_t
419 queue_attr_store(struct kobject *kobj, struct attribute *attr,
420                     const char *page, size_t length)
421 {
422         struct queue_sysfs_entry *entry = to_queue(attr);
423         struct request_queue *q;
424         ssize_t res;
425
426         if (!entry->store)
427                 return -EIO;
428
429         q = container_of(kobj, struct request_queue, kobj);
430         mutex_lock(&q->sysfs_lock);
431         if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
432                 mutex_unlock(&q->sysfs_lock);
433                 return -ENOENT;
434         }
435         res = entry->store(q, page, length);
436         mutex_unlock(&q->sysfs_lock);
437         return res;
438 }
439
440 /**
441  * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
442  * @kobj:    the kobj belonging of the request queue to be released
443  *
444  * Description:
445  *     blk_cleanup_queue is the pair to blk_init_queue() or
446  *     blk_queue_make_request().  It should be called when a request queue is
447  *     being released; typically when a block device is being de-registered.
448  *     Currently, its primary task it to free all the &struct request
449  *     structures that were allocated to the queue and the queue itself.
450  *
451  * Caveat:
452  *     Hopefully the low level driver will have finished any
453  *     outstanding requests first...
454  **/
455 static void blk_release_queue(struct kobject *kobj)
456 {
457         struct request_queue *q =
458                 container_of(kobj, struct request_queue, kobj);
459         struct request_list *rl = &q->rq;
460
461         blk_sync_queue(q);
462
463         if (rl->rq_pool)
464                 mempool_destroy(rl->rq_pool);
465
466         if (q->queue_tags)
467                 __blk_queue_free_tags(q);
468
469         blk_trace_shutdown(q);
470
471         bdi_destroy(&q->backing_dev_info);
472         kmem_cache_free(blk_requestq_cachep, q);
473 }
474
475 static const struct sysfs_ops queue_sysfs_ops = {
476         .show   = queue_attr_show,
477         .store  = queue_attr_store,
478 };
479
480 struct kobj_type blk_queue_ktype = {
481         .sysfs_ops      = &queue_sysfs_ops,
482         .default_attrs  = default_attrs,
483         .release        = blk_release_queue,
484 };
485
486 int blk_register_queue(struct gendisk *disk)
487 {
488         int ret;
489         struct device *dev = disk_to_dev(disk);
490
491         struct request_queue *q = disk->queue;
492
493         if (WARN_ON(!q))
494                 return -ENXIO;
495
496         ret = blk_trace_init_sysfs(dev);
497         if (ret)
498                 return ret;
499
500         ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
501         if (ret < 0)
502                 return ret;
503
504         kobject_uevent(&q->kobj, KOBJ_ADD);
505
506         if (!q->request_fn)
507                 return 0;
508
509         ret = elv_register_queue(q);
510         if (ret) {
511                 kobject_uevent(&q->kobj, KOBJ_REMOVE);
512                 kobject_del(&q->kobj);
513                 blk_trace_remove_sysfs(disk_to_dev(disk));
514                 return ret;
515         }
516
517         return 0;
518 }
519
520 void blk_unregister_queue(struct gendisk *disk)
521 {
522         struct request_queue *q = disk->queue;
523
524         if (WARN_ON(!q))
525                 return;
526
527         if (q->request_fn)
528                 elv_unregister_queue(q);
529
530         kobject_uevent(&q->kobj, KOBJ_REMOVE);
531         kobject_del(&q->kobj);
532         blk_trace_remove_sysfs(disk_to_dev(disk));
533         kobject_put(&disk_to_dev(disk)->kobj);
534 }