OSDN Git Service

nvme: remove nvme_alloc_request and nvme_alloc_request_qid
[uclinux-h8/linux.git] / drivers / nvme / host / nvme.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2011-2014, Intel Corporation.
4  */
5
6 #ifndef _NVME_H
7 #define _NVME_H
8
9 #include <linux/nvme.h>
10 #include <linux/cdev.h>
11 #include <linux/pci.h>
12 #include <linux/kref.h>
13 #include <linux/blk-mq.h>
14 #include <linux/sed-opal.h>
15 #include <linux/fault-inject.h>
16 #include <linux/rcupdate.h>
17 #include <linux/wait.h>
18 #include <linux/t10-pi.h>
19
20 #include <trace/events/block.h>
21
22 extern unsigned int nvme_io_timeout;
23 #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
24
25 extern unsigned int admin_timeout;
26 #define NVME_ADMIN_TIMEOUT      (admin_timeout * HZ)
27
28 #define NVME_DEFAULT_KATO       5
29
30 #ifdef CONFIG_ARCH_NO_SG_CHAIN
31 #define  NVME_INLINE_SG_CNT  0
32 #define  NVME_INLINE_METADATA_SG_CNT  0
33 #else
34 #define  NVME_INLINE_SG_CNT  2
35 #define  NVME_INLINE_METADATA_SG_CNT  1
36 #endif
37
38 /*
39  * Default to a 4K page size, with the intention to update this
40  * path in the future to accommodate architectures with differing
41  * kernel and IO page sizes.
42  */
43 #define NVME_CTRL_PAGE_SHIFT    12
44 #define NVME_CTRL_PAGE_SIZE     (1 << NVME_CTRL_PAGE_SHIFT)
45
46 extern struct workqueue_struct *nvme_wq;
47 extern struct workqueue_struct *nvme_reset_wq;
48 extern struct workqueue_struct *nvme_delete_wq;
49
50 /*
51  * List of workarounds for devices that required behavior not specified in
52  * the standard.
53  */
54 enum nvme_quirks {
55         /*
56          * Prefers I/O aligned to a stripe size specified in a vendor
57          * specific Identify field.
58          */
59         NVME_QUIRK_STRIPE_SIZE                  = (1 << 0),
60
61         /*
62          * The controller doesn't handle Identify value others than 0 or 1
63          * correctly.
64          */
65         NVME_QUIRK_IDENTIFY_CNS                 = (1 << 1),
66
67         /*
68          * The controller deterministically returns O's on reads to
69          * logical blocks that deallocate was called on.
70          */
71         NVME_QUIRK_DEALLOCATE_ZEROES            = (1 << 2),
72
73         /*
74          * The controller needs a delay before starts checking the device
75          * readiness, which is done by reading the NVME_CSTS_RDY bit.
76          */
77         NVME_QUIRK_DELAY_BEFORE_CHK_RDY         = (1 << 3),
78
79         /*
80          * APST should not be used.
81          */
82         NVME_QUIRK_NO_APST                      = (1 << 4),
83
84         /*
85          * The deepest sleep state should not be used.
86          */
87         NVME_QUIRK_NO_DEEPEST_PS                = (1 << 5),
88
89         /*
90          * Set MEDIUM priority on SQ creation
91          */
92         NVME_QUIRK_MEDIUM_PRIO_SQ               = (1 << 7),
93
94         /*
95          * Ignore device provided subnqn.
96          */
97         NVME_QUIRK_IGNORE_DEV_SUBNQN            = (1 << 8),
98
99         /*
100          * Broken Write Zeroes.
101          */
102         NVME_QUIRK_DISABLE_WRITE_ZEROES         = (1 << 9),
103
104         /*
105          * Force simple suspend/resume path.
106          */
107         NVME_QUIRK_SIMPLE_SUSPEND               = (1 << 10),
108
109         /*
110          * Use only one interrupt vector for all queues
111          */
112         NVME_QUIRK_SINGLE_VECTOR                = (1 << 11),
113
114         /*
115          * Use non-standard 128 bytes SQEs.
116          */
117         NVME_QUIRK_128_BYTES_SQES               = (1 << 12),
118
119         /*
120          * Prevent tag overlap between queues
121          */
122         NVME_QUIRK_SHARED_TAGS                  = (1 << 13),
123
124         /*
125          * Don't change the value of the temperature threshold feature
126          */
127         NVME_QUIRK_NO_TEMP_THRESH_CHANGE        = (1 << 14),
128
129         /*
130          * The controller doesn't handle the Identify Namespace
131          * Identification Descriptor list subcommand despite claiming
132          * NVMe 1.3 compliance.
133          */
134         NVME_QUIRK_NO_NS_DESC_LIST              = (1 << 15),
135
136         /*
137          * The controller does not properly handle DMA addresses over
138          * 48 bits.
139          */
140         NVME_QUIRK_DMA_ADDRESS_BITS_48          = (1 << 16),
141
142         /*
143          * The controller requires the command_id value be be limited, so skip
144          * encoding the generation sequence number.
145          */
146         NVME_QUIRK_SKIP_CID_GEN                 = (1 << 17),
147 };
148
149 /*
150  * Common request structure for NVMe passthrough.  All drivers must have
151  * this structure as the first member of their request-private data.
152  */
153 struct nvme_request {
154         struct nvme_command     *cmd;
155         union nvme_result       result;
156         u8                      genctr;
157         u8                      retries;
158         u8                      flags;
159         u16                     status;
160         struct nvme_ctrl        *ctrl;
161 };
162
163 /*
164  * Mark a bio as coming in through the mpath node.
165  */
166 #define REQ_NVME_MPATH          REQ_DRV
167
168 enum {
169         NVME_REQ_CANCELLED              = (1 << 0),
170         NVME_REQ_USERCMD                = (1 << 1),
171 };
172
173 static inline struct nvme_request *nvme_req(struct request *req)
174 {
175         return blk_mq_rq_to_pdu(req);
176 }
177
178 static inline u16 nvme_req_qid(struct request *req)
179 {
180         if (!req->q->queuedata)
181                 return 0;
182
183         return req->mq_hctx->queue_num + 1;
184 }
185
186 /* The below value is the specific amount of delay needed before checking
187  * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
188  * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
189  * found empirically.
190  */
191 #define NVME_QUIRK_DELAY_AMOUNT         2300
192
193 /*
194  * enum nvme_ctrl_state: Controller state
195  *
196  * @NVME_CTRL_NEW:              New controller just allocated, initial state
197  * @NVME_CTRL_LIVE:             Controller is connected and I/O capable
198  * @NVME_CTRL_RESETTING:        Controller is resetting (or scheduled reset)
199  * @NVME_CTRL_CONNECTING:       Controller is disconnected, now connecting the
200  *                              transport
201  * @NVME_CTRL_DELETING:         Controller is deleting (or scheduled deletion)
202  * @NVME_CTRL_DELETING_NOIO:    Controller is deleting and I/O is not
203  *                              disabled/failed immediately. This state comes
204  *                              after all async event processing took place and
205  *                              before ns removal and the controller deletion
206  *                              progress
207  * @NVME_CTRL_DEAD:             Controller is non-present/unresponsive during
208  *                              shutdown or removal. In this case we forcibly
209  *                              kill all inflight I/O as they have no chance to
210  *                              complete
211  */
212 enum nvme_ctrl_state {
213         NVME_CTRL_NEW,
214         NVME_CTRL_LIVE,
215         NVME_CTRL_RESETTING,
216         NVME_CTRL_CONNECTING,
217         NVME_CTRL_DELETING,
218         NVME_CTRL_DELETING_NOIO,
219         NVME_CTRL_DEAD,
220 };
221
222 struct nvme_fault_inject {
223 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
224         struct fault_attr attr;
225         struct dentry *parent;
226         bool dont_retry;        /* DNR, do not retry */
227         u16 status;             /* status code */
228 #endif
229 };
230
231 struct nvme_ctrl {
232         bool comp_seen;
233         enum nvme_ctrl_state state;
234         bool identified;
235         spinlock_t lock;
236         struct mutex scan_lock;
237         const struct nvme_ctrl_ops *ops;
238         struct request_queue *admin_q;
239         struct request_queue *connect_q;
240         struct request_queue *fabrics_q;
241         struct device *dev;
242         int instance;
243         int numa_node;
244         struct blk_mq_tag_set *tagset;
245         struct blk_mq_tag_set *admin_tagset;
246         struct list_head namespaces;
247         struct rw_semaphore namespaces_rwsem;
248         struct device ctrl_device;
249         struct device *device;  /* char device */
250 #ifdef CONFIG_NVME_HWMON
251         struct device *hwmon_device;
252 #endif
253         struct cdev cdev;
254         struct work_struct reset_work;
255         struct work_struct delete_work;
256         wait_queue_head_t state_wq;
257
258         struct nvme_subsystem *subsys;
259         struct list_head subsys_entry;
260
261         struct opal_dev *opal_dev;
262
263         char name[12];
264         u16 cntlid;
265
266         u32 ctrl_config;
267         u16 mtfa;
268         u32 queue_count;
269
270         u64 cap;
271         u32 max_hw_sectors;
272         u32 max_segments;
273         u32 max_integrity_segments;
274         u32 max_discard_sectors;
275         u32 max_discard_segments;
276         u32 max_zeroes_sectors;
277 #ifdef CONFIG_BLK_DEV_ZONED
278         u32 max_zone_append;
279 #endif
280         u16 crdt[3];
281         u16 oncs;
282         u16 oacs;
283         u16 nr_streams;
284         u16 sqsize;
285         u32 max_namespaces;
286         atomic_t abort_limit;
287         u8 vwc;
288         u32 vs;
289         u32 sgls;
290         u16 kas;
291         u8 npss;
292         u8 apsta;
293         u16 wctemp;
294         u16 cctemp;
295         u32 oaes;
296         u32 aen_result;
297         u32 ctratt;
298         unsigned int shutdown_timeout;
299         unsigned int kato;
300         bool subsystem;
301         unsigned long quirks;
302         struct nvme_id_power_state psd[32];
303         struct nvme_effects_log *effects;
304         struct xarray cels;
305         struct work_struct scan_work;
306         struct work_struct async_event_work;
307         struct delayed_work ka_work;
308         struct delayed_work failfast_work;
309         struct nvme_command ka_cmd;
310         struct work_struct fw_act_work;
311         unsigned long events;
312
313 #ifdef CONFIG_NVME_MULTIPATH
314         /* asymmetric namespace access: */
315         u8 anacap;
316         u8 anatt;
317         u32 anagrpmax;
318         u32 nanagrpid;
319         struct mutex ana_lock;
320         struct nvme_ana_rsp_hdr *ana_log_buf;
321         size_t ana_log_size;
322         struct timer_list anatt_timer;
323         struct work_struct ana_work;
324 #endif
325
326         /* Power saving configuration */
327         u64 ps_max_latency_us;
328         bool apst_enabled;
329
330         /* PCIe only: */
331         u32 hmpre;
332         u32 hmmin;
333         u32 hmminds;
334         u16 hmmaxd;
335
336         /* Fabrics only */
337         u32 ioccsz;
338         u32 iorcsz;
339         u16 icdoff;
340         u16 maxcmd;
341         int nr_reconnects;
342         unsigned long flags;
343 #define NVME_CTRL_FAILFAST_EXPIRED      0
344 #define NVME_CTRL_ADMIN_Q_STOPPED       1
345         struct nvmf_ctrl_options *opts;
346
347         struct page *discard_page;
348         unsigned long discard_page_busy;
349
350         struct nvme_fault_inject fault_inject;
351
352         enum nvme_ctrl_type cntrltype;
353         enum nvme_dctype dctype;
354 };
355
356 enum nvme_iopolicy {
357         NVME_IOPOLICY_NUMA,
358         NVME_IOPOLICY_RR,
359 };
360
361 struct nvme_subsystem {
362         int                     instance;
363         struct device           dev;
364         /*
365          * Because we unregister the device on the last put we need
366          * a separate refcount.
367          */
368         struct kref             ref;
369         struct list_head        entry;
370         struct mutex            lock;
371         struct list_head        ctrls;
372         struct list_head        nsheads;
373         char                    subnqn[NVMF_NQN_SIZE];
374         char                    serial[20];
375         char                    model[40];
376         char                    firmware_rev[8];
377         u8                      cmic;
378         enum nvme_subsys_type   subtype;
379         u16                     vendor_id;
380         u16                     awupf;  /* 0's based awupf value. */
381         struct ida              ns_ida;
382 #ifdef CONFIG_NVME_MULTIPATH
383         enum nvme_iopolicy      iopolicy;
384 #endif
385 };
386
387 /*
388  * Container structure for uniqueue namespace identifiers.
389  */
390 struct nvme_ns_ids {
391         u8      eui64[8];
392         u8      nguid[16];
393         uuid_t  uuid;
394         u8      csi;
395 };
396
397 /*
398  * Anchor structure for namespaces.  There is one for each namespace in a
399  * NVMe subsystem that any of our controllers can see, and the namespace
400  * structure for each controller is chained of it.  For private namespaces
401  * there is a 1:1 relation to our namespace structures, that is ->list
402  * only ever has a single entry for private namespaces.
403  */
404 struct nvme_ns_head {
405         struct list_head        list;
406         struct srcu_struct      srcu;
407         struct nvme_subsystem   *subsys;
408         unsigned                ns_id;
409         struct nvme_ns_ids      ids;
410         struct list_head        entry;
411         struct kref             ref;
412         bool                    shared;
413         int                     instance;
414         struct nvme_effects_log *effects;
415
416         struct cdev             cdev;
417         struct device           cdev_device;
418
419         struct gendisk          *disk;
420 #ifdef CONFIG_NVME_MULTIPATH
421         struct bio_list         requeue_list;
422         spinlock_t              requeue_lock;
423         struct work_struct      requeue_work;
424         struct mutex            lock;
425         unsigned long           flags;
426 #define NVME_NSHEAD_DISK_LIVE   0
427         struct nvme_ns __rcu    *current_path[];
428 #endif
429 };
430
431 static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
432 {
433         return IS_ENABLED(CONFIG_NVME_MULTIPATH) && head->disk;
434 }
435
436 enum nvme_ns_features {
437         NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
438         NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
439 };
440
441 struct nvme_ns {
442         struct list_head list;
443
444         struct nvme_ctrl *ctrl;
445         struct request_queue *queue;
446         struct gendisk *disk;
447 #ifdef CONFIG_NVME_MULTIPATH
448         enum nvme_ana_state ana_state;
449         u32 ana_grpid;
450 #endif
451         struct list_head siblings;
452         struct kref kref;
453         struct nvme_ns_head *head;
454
455         int lba_shift;
456         u16 ms;
457         u16 sgs;
458         u32 sws;
459         u8 pi_type;
460 #ifdef CONFIG_BLK_DEV_ZONED
461         u64 zsze;
462 #endif
463         unsigned long features;
464         unsigned long flags;
465 #define NVME_NS_REMOVING        0
466 #define NVME_NS_DEAD            1
467 #define NVME_NS_ANA_PENDING     2
468 #define NVME_NS_FORCE_RO        3
469 #define NVME_NS_READY           4
470 #define NVME_NS_STOPPED         5
471
472         struct cdev             cdev;
473         struct device           cdev_device;
474
475         struct nvme_fault_inject fault_inject;
476
477 };
478
479 /* NVMe ns supports metadata actions by the controller (generate/strip) */
480 static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
481 {
482         return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple);
483 }
484
485 struct nvme_ctrl_ops {
486         const char *name;
487         struct module *module;
488         unsigned int flags;
489 #define NVME_F_FABRICS                  (1 << 0)
490 #define NVME_F_METADATA_SUPPORTED       (1 << 1)
491 #define NVME_F_PCI_P2PDMA               (1 << 2)
492         int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
493         int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
494         int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
495         void (*free_ctrl)(struct nvme_ctrl *ctrl);
496         void (*submit_async_event)(struct nvme_ctrl *ctrl);
497         void (*delete_ctrl)(struct nvme_ctrl *ctrl);
498         int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
499 };
500
501 /*
502  * nvme command_id is constructed as such:
503  * | xxxx | xxxxxxxxxxxx |
504  *   gen    request tag
505  */
506 #define nvme_genctr_mask(gen)                   (gen & 0xf)
507 #define nvme_cid_install_genctr(gen)            (nvme_genctr_mask(gen) << 12)
508 #define nvme_genctr_from_cid(cid)               ((cid & 0xf000) >> 12)
509 #define nvme_tag_from_cid(cid)                  (cid & 0xfff)
510
511 static inline u16 nvme_cid(struct request *rq)
512 {
513         return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag;
514 }
515
516 static inline struct request *nvme_find_rq(struct blk_mq_tags *tags,
517                 u16 command_id)
518 {
519         u8 genctr = nvme_genctr_from_cid(command_id);
520         u16 tag = nvme_tag_from_cid(command_id);
521         struct request *rq;
522
523         rq = blk_mq_tag_to_rq(tags, tag);
524         if (unlikely(!rq)) {
525                 pr_err("could not locate request for tag %#x\n",
526                         tag);
527                 return NULL;
528         }
529         if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) {
530                 dev_err(nvme_req(rq)->ctrl->device,
531                         "request %#x genctr mismatch (got %#x expected %#x)\n",
532                         tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr));
533                 return NULL;
534         }
535         return rq;
536 }
537
538 static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
539                 u16 command_id)
540 {
541         return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
542 }
543
544 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
545 void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
546                             const char *dev_name);
547 void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject);
548 void nvme_should_fail(struct request *req);
549 #else
550 static inline void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
551                                           const char *dev_name)
552 {
553 }
554 static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj)
555 {
556 }
557 static inline void nvme_should_fail(struct request *req) {}
558 #endif
559
560 static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
561 {
562         if (!ctrl->subsystem)
563                 return -ENOTTY;
564         return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
565 }
566
567 /*
568  * Convert a 512B sector number to a device logical block number.
569  */
570 static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector)
571 {
572         return sector >> (ns->lba_shift - SECTOR_SHIFT);
573 }
574
575 /*
576  * Convert a device logical block number to a 512B sector number.
577  */
578 static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba)
579 {
580         return lba << (ns->lba_shift - SECTOR_SHIFT);
581 }
582
583 /*
584  * Convert byte length to nvme's 0-based num dwords
585  */
586 static inline u32 nvme_bytes_to_numd(size_t len)
587 {
588         return (len >> 2) - 1;
589 }
590
591 static inline bool nvme_is_ana_error(u16 status)
592 {
593         switch (status & 0x7ff) {
594         case NVME_SC_ANA_TRANSITION:
595         case NVME_SC_ANA_INACCESSIBLE:
596         case NVME_SC_ANA_PERSISTENT_LOSS:
597                 return true;
598         default:
599                 return false;
600         }
601 }
602
603 static inline bool nvme_is_path_error(u16 status)
604 {
605         /* check for a status code type of 'path related status' */
606         return (status & 0x700) == 0x300;
607 }
608
609 /*
610  * Fill in the status and result information from the CQE, and then figure out
611  * if blk-mq will need to use IPI magic to complete the request, and if yes do
612  * so.  If not let the caller complete the request without an indirect function
613  * call.
614  */
615 static inline bool nvme_try_complete_req(struct request *req, __le16 status,
616                 union nvme_result result)
617 {
618         struct nvme_request *rq = nvme_req(req);
619         struct nvme_ctrl *ctrl = rq->ctrl;
620
621         if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
622                 rq->genctr++;
623
624         rq->status = le16_to_cpu(status) >> 1;
625         rq->result = result;
626         /* inject error when permitted by fault injection framework */
627         nvme_should_fail(req);
628         if (unlikely(blk_should_fake_timeout(req->q)))
629                 return true;
630         return blk_mq_complete_request_remote(req);
631 }
632
633 static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
634 {
635         get_device(ctrl->device);
636 }
637
638 static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
639 {
640         put_device(ctrl->device);
641 }
642
643 static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
644 {
645         return !qid &&
646                 nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
647 }
648
649 void nvme_complete_rq(struct request *req);
650 void nvme_complete_batch_req(struct request *req);
651
652 static __always_inline void nvme_complete_batch(struct io_comp_batch *iob,
653                                                 void (*fn)(struct request *rq))
654 {
655         struct request *req;
656
657         rq_list_for_each(&iob->req_list, req) {
658                 fn(req);
659                 nvme_complete_batch_req(req);
660         }
661         blk_mq_end_request_batch(iob);
662 }
663
664 blk_status_t nvme_host_path_error(struct request *req);
665 bool nvme_cancel_request(struct request *req, void *data, bool reserved);
666 void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
667 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
668 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
669                 enum nvme_ctrl_state new_state);
670 bool nvme_wait_reset(struct nvme_ctrl *ctrl);
671 int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
672 int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
673 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
674 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
675                 const struct nvme_ctrl_ops *ops, unsigned long quirks);
676 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
677 void nvme_start_ctrl(struct nvme_ctrl *ctrl);
678 void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
679 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);
680
681 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
682
683 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
684                 bool send);
685
686 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
687                 volatile union nvme_result *res);
688
689 void nvme_stop_queues(struct nvme_ctrl *ctrl);
690 void nvme_start_queues(struct nvme_ctrl *ctrl);
691 void nvme_stop_admin_queue(struct nvme_ctrl *ctrl);
692 void nvme_start_admin_queue(struct nvme_ctrl *ctrl);
693 void nvme_kill_queues(struct nvme_ctrl *ctrl);
694 void nvme_sync_queues(struct nvme_ctrl *ctrl);
695 void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
696 void nvme_unfreeze(struct nvme_ctrl *ctrl);
697 void nvme_wait_freeze(struct nvme_ctrl *ctrl);
698 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
699 void nvme_start_freeze(struct nvme_ctrl *ctrl);
700
701 static inline unsigned int nvme_req_op(struct nvme_command *cmd)
702 {
703         return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
704 }
705
706 #define NVME_QID_ANY -1
707 void nvme_init_request(struct request *req, struct nvme_command *cmd);
708 void nvme_cleanup_cmd(struct request *req);
709 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
710 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
711                 struct request *req);
712 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
713                 bool queue_live);
714
715 static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
716                 bool queue_live)
717 {
718         if (likely(ctrl->state == NVME_CTRL_LIVE))
719                 return true;
720         if (ctrl->ops->flags & NVME_F_FABRICS &&
721             ctrl->state == NVME_CTRL_DELETING)
722                 return queue_live;
723         return __nvme_check_ready(ctrl, rq, queue_live);
724 }
725 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
726                 void *buf, unsigned bufflen);
727 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
728                 union nvme_result *result, void *buffer, unsigned bufflen,
729                 unsigned timeout, int qid, int at_head,
730                 blk_mq_req_flags_t flags);
731 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
732                       unsigned int dword11, void *buffer, size_t buflen,
733                       u32 *result);
734 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
735                       unsigned int dword11, void *buffer, size_t buflen,
736                       u32 *result);
737 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
738 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
739 int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
740 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
741 int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
742 int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
743 void nvme_queue_scan(struct nvme_ctrl *ctrl);
744 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
745                 void *log, size_t size, u64 offset);
746 bool nvme_tryget_ns_head(struct nvme_ns_head *head);
747 void nvme_put_ns_head(struct nvme_ns_head *head);
748 int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
749                 const struct file_operations *fops, struct module *owner);
750 void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device);
751 int nvme_ioctl(struct block_device *bdev, fmode_t mode,
752                 unsigned int cmd, unsigned long arg);
753 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
754 int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
755                 unsigned int cmd, unsigned long arg);
756 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
757                 unsigned long arg);
758 long nvme_dev_ioctl(struct file *file, unsigned int cmd,
759                 unsigned long arg);
760 int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
761
762 extern const struct attribute_group *nvme_ns_id_attr_groups[];
763 extern const struct pr_ops nvme_pr_ops;
764 extern const struct block_device_operations nvme_ns_head_ops;
765
766 struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
767 #ifdef CONFIG_NVME_MULTIPATH
768 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
769 {
770         return ctrl->ana_log_buf != NULL;
771 }
772
773 void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
774 void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
775 void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
776 void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
777 void nvme_failover_req(struct request *req);
778 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
779 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
780 void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
781 void nvme_mpath_remove_disk(struct nvme_ns_head *head);
782 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
783 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
784 void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
785 void nvme_mpath_stop(struct nvme_ctrl *ctrl);
786 bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
787 void nvme_mpath_revalidate_paths(struct nvme_ns *ns);
788 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
789 void nvme_mpath_shutdown_disk(struct nvme_ns_head *head);
790
791 static inline void nvme_trace_bio_complete(struct request *req)
792 {
793         struct nvme_ns *ns = req->q->queuedata;
794
795         if (req->cmd_flags & REQ_NVME_MPATH)
796                 trace_block_bio_complete(ns->head->disk->queue, req->bio);
797 }
798
799 extern bool multipath;
800 extern struct device_attribute dev_attr_ana_grpid;
801 extern struct device_attribute dev_attr_ana_state;
802 extern struct device_attribute subsys_attr_iopolicy;
803
804 #else
805 #define multipath false
806 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
807 {
808         return false;
809 }
810 static inline void nvme_failover_req(struct request *req)
811 {
812 }
813 static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
814 {
815 }
816 static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
817                 struct nvme_ns_head *head)
818 {
819         return 0;
820 }
821 static inline void nvme_mpath_add_disk(struct nvme_ns *ns,
822                 struct nvme_id_ns *id)
823 {
824 }
825 static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
826 {
827 }
828 static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
829 {
830         return false;
831 }
832 static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
833 {
834 }
835 static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
836 {
837 }
838 static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
839 {
840 }
841 static inline void nvme_trace_bio_complete(struct request *req)
842 {
843 }
844 static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
845 {
846 }
847 static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
848                 struct nvme_id_ctrl *id)
849 {
850         if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)
851                 dev_warn(ctrl->device,
852 "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
853         return 0;
854 }
855 static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
856 {
857 }
858 static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
859 {
860 }
861 static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
862 {
863 }
864 static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
865 {
866 }
867 static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
868 {
869 }
870 static inline void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
871 {
872 }
873 #endif /* CONFIG_NVME_MULTIPATH */
874
875 int nvme_revalidate_zones(struct nvme_ns *ns);
876 int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
877                 unsigned int nr_zones, report_zones_cb cb, void *data);
878 #ifdef CONFIG_BLK_DEV_ZONED
879 int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf);
880 blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
881                                        struct nvme_command *cmnd,
882                                        enum nvme_zone_mgmt_action action);
883 #else
884 static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
885                 struct request *req, struct nvme_command *cmnd,
886                 enum nvme_zone_mgmt_action action)
887 {
888         return BLK_STS_NOTSUPP;
889 }
890
891 static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
892 {
893         dev_warn(ns->ctrl->device,
894                  "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n");
895         return -EPROTONOSUPPORT;
896 }
897 #endif
898
899 static inline int nvme_ctrl_init_connect_q(struct nvme_ctrl *ctrl)
900 {
901         ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
902         if (IS_ERR(ctrl->connect_q))
903                 return PTR_ERR(ctrl->connect_q);
904         return 0;
905 }
906
907 static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
908 {
909         return dev_to_disk(dev)->private_data;
910 }
911
912 #ifdef CONFIG_NVME_HWMON
913 int nvme_hwmon_init(struct nvme_ctrl *ctrl);
914 void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
915 #else
916 static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
917 {
918         return 0;
919 }
920
921 static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
922 {
923 }
924 #endif
925
926 static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
927 {
928         return ctrl->sgls & ((1 << 0) | (1 << 1));
929 }
930
931 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
932                          u8 opcode);
933 int nvme_execute_passthru_rq(struct request *rq);
934 struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
935 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
936 void nvme_put_ns(struct nvme_ns *ns);
937
938 static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
939 {
940         return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
941 }
942
943 #ifdef CONFIG_NVME_VERBOSE_ERRORS
944 const unsigned char *nvme_get_error_status_str(u16 status);
945 const unsigned char *nvme_get_opcode_str(u8 opcode);
946 const unsigned char *nvme_get_admin_opcode_str(u8 opcode);
947 #else /* CONFIG_NVME_VERBOSE_ERRORS */
948 static inline const unsigned char *nvme_get_error_status_str(u16 status)
949 {
950         return "I/O Error";
951 }
952 static inline const unsigned char *nvme_get_opcode_str(u8 opcode)
953 {
954         return "I/O Cmd";
955 }
956 static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
957 {
958         return "Admin Cmd";
959 }
960 #endif /* CONFIG_NVME_VERBOSE_ERRORS */
961
962 #endif /* _NVME_H */