1 /****************************************************************************
2 ****************************************************************************
4 *** This header was automatically generated from a Linux kernel header
5 *** of the same name, to make information necessary for userspace to
6 *** call into the kernel available to libc. It contains only constants,
7 *** structures, and macros generated from the original header, and thus,
8 *** contains no copyrightable information.
10 ****************************************************************************
11 ****************************************************************************/
12 #ifndef _LINUX_BLKDEV_H
13 #define _LINUX_BLKDEV_H
15 #include <linux/major.h>
16 #include <linux/genhd.h>
17 #include <linux/list.h>
18 #include <linux/timer.h>
19 #include <linux/workqueue.h>
20 #include <linux/pagemap.h>
21 #include <linux/backing-dev.h>
22 #include <linux/wait.h>
23 #include <linux/mempool.h>
24 #include <linux/bio.h>
25 #include <linux/module.h>
26 #include <linux/stringify.h>
28 #include <asm/scatterlist.h>
30 struct scsi_ioctl_command;
33 typedef struct request_queue request_queue_t;
34 struct elevator_queue;
35 typedef struct elevator_queue elevator_t;
36 struct request_pm_state;
39 #define BLKDEV_MIN_RQ 4
40 #define BLKDEV_MAX_RQ 128
42 struct as_io_context {
45 void (*dtor)(struct as_io_context *aic);
46 void (*exit)(struct as_io_context *aic);
50 atomic_t nr_dispatched;
52 unsigned long last_end_request;
53 unsigned long ttime_total;
54 unsigned long ttime_samples;
55 unsigned long ttime_mean;
57 unsigned int seek_samples;
58 sector_t last_request_pos;
64 struct cfq_io_context {
65 struct rb_node rb_node;
68 struct cfq_queue *cfqq[2];
70 struct io_context *ioc;
72 unsigned long last_end_request;
73 sector_t last_request_pos;
74 unsigned long last_queue;
76 unsigned long ttime_total;
77 unsigned long ttime_samples;
78 unsigned long ttime_mean;
80 unsigned int seek_samples;
84 struct list_head queue_list;
86 void (*dtor)(struct io_context *);
87 void (*exit)(struct io_context *);
92 struct task_struct *task;
94 int (*set_ioprio)(struct io_context *, unsigned int);
96 unsigned long last_waited;
97 int nr_batch_requests;
99 struct as_io_context *aic;
100 struct rb_root cic_root;
103 struct io_context *current_io_context(gfp_t gfp_flags);
104 struct io_context *get_io_context(gfp_t gfp_flags);
107 typedef void (rq_end_io_fn)(struct request *, int);
109 struct request_list {
114 wait_queue_head_t wait[2];
117 #define BLK_MAX_CDB 16
120 struct list_head queuelist;
121 struct list_head donelist;
126 unsigned long nr_sectors;
128 unsigned int current_nr_sectors;
130 sector_t hard_sector;
131 unsigned long hard_nr_sectors;
133 unsigned int hard_cur_sectors;
138 void *elevator_private;
139 void *completion_data;
143 struct gendisk *rq_disk;
144 unsigned long start_time;
146 unsigned short nr_phys_segments;
148 unsigned short nr_hw_segments;
150 unsigned short ioprio;
156 struct request_list *rl;
158 struct completion *waiting;
162 unsigned int cmd_len;
163 unsigned char cmd[BLK_MAX_CDB];
165 unsigned int data_len;
166 unsigned int sense_len;
170 unsigned int timeout;
173 rq_end_io_fn *end_io;
200 __REQ_DRIVE_TASKFILE,
210 #define REQ_RW (1 << __REQ_RW)
211 #define REQ_FAILFAST (1 << __REQ_FAILFAST)
212 #define REQ_SORTED (1 << __REQ_SORTED)
213 #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
214 #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
215 #define REQ_FUA (1 << __REQ_FUA)
216 #define REQ_CMD (1 << __REQ_CMD)
217 #define REQ_NOMERGE (1 << __REQ_NOMERGE)
218 #define REQ_STARTED (1 << __REQ_STARTED)
219 #define REQ_DONTPREP (1 << __REQ_DONTPREP)
220 #define REQ_QUEUED (1 << __REQ_QUEUED)
221 #define REQ_ELVPRIV (1 << __REQ_ELVPRIV)
222 #define REQ_PC (1 << __REQ_PC)
223 #define REQ_BLOCK_PC (1 << __REQ_BLOCK_PC)
224 #define REQ_SENSE (1 << __REQ_SENSE)
225 #define REQ_FAILED (1 << __REQ_FAILED)
226 #define REQ_QUIET (1 << __REQ_QUIET)
227 #define REQ_SPECIAL (1 << __REQ_SPECIAL)
228 #define REQ_DRIVE_CMD (1 << __REQ_DRIVE_CMD)
229 #define REQ_DRIVE_TASK (1 << __REQ_DRIVE_TASK)
230 #define REQ_DRIVE_TASKFILE (1 << __REQ_DRIVE_TASKFILE)
231 #define REQ_PREEMPT (1 << __REQ_PREEMPT)
232 #define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND)
233 #define REQ_PM_RESUME (1 << __REQ_PM_RESUME)
234 #define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN)
235 #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
236 #define REQ_RW_SYNC (1 << __REQ_RW_SYNC)
238 struct request_pm_state
247 #include <linux/elevator.h>
249 typedef int (merge_request_fn) (request_queue_t *, struct request *,
251 typedef int (merge_requests_fn) (request_queue_t *, struct request *,
253 typedef void (request_fn_proc) (request_queue_t *q);
254 typedef int (make_request_fn) (request_queue_t *q, struct bio *bio);
255 typedef int (prep_rq_fn) (request_queue_t *, struct request *);
256 typedef void (unplug_fn) (request_queue_t *);
259 typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *);
260 typedef void (activity_fn) (void *data, int rw);
261 typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
262 typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
263 typedef void (softirq_done_fn)(struct request *);
265 enum blk_queue_state {
270 struct blk_queue_tag {
271 struct request **tag_index;
272 unsigned long *tag_map;
273 struct list_head busy_list;
283 struct list_head queue_head;
284 struct request *last_merge;
285 elevator_t *elevator;
287 struct request_list rq;
289 request_fn_proc *request_fn;
290 merge_request_fn *back_merge_fn;
291 merge_request_fn *front_merge_fn;
292 merge_requests_fn *merge_requests_fn;
293 make_request_fn *make_request_fn;
294 prep_rq_fn *prep_rq_fn;
295 unplug_fn *unplug_fn;
296 merge_bvec_fn *merge_bvec_fn;
297 activity_fn *activity_fn;
298 issue_flush_fn *issue_flush_fn;
299 prepare_flush_fn *prepare_flush_fn;
300 softirq_done_fn *softirq_done_fn;
303 struct request *boundary_rq;
305 struct timer_list unplug_timer;
307 unsigned long unplug_delay;
308 struct work_struct unplug_work;
310 struct backing_dev_info backing_dev_info;
316 unsigned long bounce_pfn;
319 unsigned long queue_flags;
321 spinlock_t __queue_lock;
322 spinlock_t *queue_lock;
326 unsigned long nr_requests;
327 unsigned int nr_congestion_on;
328 unsigned int nr_congestion_off;
329 unsigned int nr_batching;
331 unsigned int max_sectors;
332 unsigned int max_hw_sectors;
333 unsigned short max_phys_segments;
334 unsigned short max_hw_segments;
335 unsigned short hardsect_size;
336 unsigned int max_segment_size;
338 unsigned long seg_boundary_mask;
339 unsigned int dma_alignment;
341 struct blk_queue_tag *queue_tags;
343 unsigned int nr_sorted;
344 unsigned int in_flight;
346 unsigned int sg_timeout;
347 unsigned int sg_reserved_size;
350 struct blk_trace *blk_trace;
352 unsigned int ordered, next_ordered, ordseq;
353 int orderr, ordcolor;
354 struct request pre_flush_rq, bar_rq, post_flush_rq;
355 struct request *orig_bar_rq;
356 unsigned int bi_size;
358 struct mutex sysfs_lock;
361 #define RQ_INACTIVE (-1)
364 #define QUEUE_FLAG_CLUSTER 0
365 #define QUEUE_FLAG_QUEUED 1
366 #define QUEUE_FLAG_STOPPED 2
367 #define QUEUE_FLAG_READFULL 3
368 #define QUEUE_FLAG_WRITEFULL 4
369 #define QUEUE_FLAG_DEAD 5
370 #define QUEUE_FLAG_REENTER 6
371 #define QUEUE_FLAG_PLUGGED 7
372 #define QUEUE_FLAG_ELVSWITCH 8
376 QUEUE_ORDERED_NONE = 0x00,
377 QUEUE_ORDERED_DRAIN = 0x01,
378 QUEUE_ORDERED_TAG = 0x02,
380 QUEUE_ORDERED_PREFLUSH = 0x10,
381 QUEUE_ORDERED_POSTFLUSH = 0x20,
382 QUEUE_ORDERED_FUA = 0x40,
384 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
385 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
386 QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
387 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
388 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
389 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
390 QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
391 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
393 QUEUE_ORDSEQ_STARTED = 0x01,
394 QUEUE_ORDSEQ_DRAIN = 0x02,
395 QUEUE_ORDSEQ_PREFLUSH = 0x04,
396 QUEUE_ORDSEQ_BAR = 0x08,
397 QUEUE_ORDSEQ_POSTFLUSH = 0x10,
398 QUEUE_ORDSEQ_DONE = 0x20,
401 #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
402 #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
403 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
404 #define blk_queue_flushing(q) ((q)->ordseq)
406 #define blk_fs_request(rq) ((rq)->flags & REQ_CMD)
407 #define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC)
408 #define blk_noretry_request(rq) ((rq)->flags & REQ_FAILFAST)
409 #define blk_rq_started(rq) ((rq)->flags & REQ_STARTED)
411 #define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq))
413 #define blk_pm_suspend_request(rq) ((rq)->flags & REQ_PM_SUSPEND)
414 #define blk_pm_resume_request(rq) ((rq)->flags & REQ_PM_RESUME)
415 #define blk_pm_request(rq) ((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME))
417 #define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED)
418 #define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER)
419 #define blk_fua_rq(rq) ((rq)->flags & REQ_FUA)
421 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
423 #define rq_data_dir(rq) ((rq)->flags & 1)
425 #define RQ_NOMERGE_FLAGS (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
426 #define rq_mergeable(rq) (!((rq)->flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))
427 #define blk_queue_headactive(q, head_active)
429 #define BLKPREP_KILL 1
430 #define BLKPREP_DEFER 2
432 #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
433 #define BLK_BOUNCE_ANY ((u64)blk_max_pfn << PAGE_SHIFT)
434 #define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
436 #define rq_for_each_bio(_bio, rq) if ((rq->bio)) for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
438 #define end_io_error(uptodate) (unlikely((uptodate) <= 0))
440 #define blk_queue_tag_depth(q) ((q)->queue_tags->busy)
441 #define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth)
442 #define blk_rq_tagged(rq) ((rq)->flags & REQ_QUEUED)
444 #define MAX_PHYS_SEGMENTS 128
445 #define MAX_HW_SEGMENTS 128
446 #define SAFE_MAX_SECTORS 255
447 #define BLK_DEF_MAX_SECTORS 1024
449 #define MAX_SEGMENT_SIZE 65536
451 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
453 #define blk_finished_io(nsects) do { } while (0)
454 #define blk_started_io(nsects) do { } while (0)
456 #define sector_div(n, b)( { int _res; _res = (n) % (b); (n) /= (b); _res; } )
458 #define MODULE_ALIAS_BLOCKDEV(major,minor) MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
459 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) MODULE_ALIAS("block-major-" __stringify(major) "-*")