Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 | #ifndef BLK_INTERNAL_H #define BLK_INTERNAL_H #include <linux/idr.h> /* Amount of time in which a process may batch requests */ #define BLK_BATCH_TIME (HZ/50UL) /* Number of requests a "batching" process may submit */ #define BLK_BATCH_REQ 32 extern struct kmem_cache *blk_requestq_cachep; extern struct kobj_type blk_queue_ktype; extern struct ida blk_queue_ida; static inline void __blk_get_queue(struct request_queue *q) { kobject_get(&q->kobj); } void init_request_from_bio(struct request *req, struct bio *bio); void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio); int blk_rq_append_bio(struct request_queue *q, struct request *rq, struct bio *bio); void blk_drain_queue(struct request_queue *q, bool drain_all); void blk_dequeue_request(struct request *rq); void __blk_queue_free_tags(struct request_queue *q); bool __blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, unsigned int bidi_bytes); void blk_rq_timed_out_timer(unsigned long data); void blk_delete_timer(struct request *); void blk_add_timer(struct request *); void __generic_unplug_device(struct request_queue *); /* * Internal atomic flags for request handling */ enum rq_atomic_flags { REQ_ATOM_COMPLETE = 0, }; /* * EH timer and IO completion will both attempt to 'grab' the request, make * sure that only one of them succeeds */ static inline int blk_mark_rq_complete(struct request *rq) { return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); } static inline void blk_clear_rq_complete(struct request *rq) { clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); } /* * Internal elevator interface */ #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) void blk_insert_flush(struct request *rq); void blk_abort_flushes(struct request_queue *q); static inline struct request *__elv_next_request(struct request_queue *q) { struct request *rq; while (1) { if (!list_empty(&q->queue_head)) { rq = list_entry_rq(q->queue_head.next); return rq; } /* * Flush request is running and flush request isn't queueable * in the drive, we can hold the queue till flush request is * finished. Even we don't do this, driver can't dispatch next * requests and will requeue them. And this can improve * throughput too. For example, we have request flush1, write1, * flush 2. flush1 is dispatched, then queue is hold, write1 * isn't inserted to queue. After flush1 is finished, flush2 * will be dispatched. Since disk cache is already clean, * flush2 will be finished very soon, so looks like flush2 is * folded to flush1. * Since the queue is hold, a flag is set to indicate the queue * should be restarted later. Please see flush_end_io() for * details. */ if (q->flush_pending_idx != q->flush_running_idx && !queue_flush_queueable(q)) { q->flush_queue_delayed = 1; return NULL; } if (unlikely(blk_queue_dead(q)) || !q->elevator->type->ops.elevator_dispatch_fn(q, 0)) return NULL; } } static inline void elv_activate_rq(struct request_queue *q, struct request *rq) { struct elevator_queue *e = q->elevator; if (e->type->ops.elevator_activate_req_fn) e->type->ops.elevator_activate_req_fn(q, rq); } static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) { struct elevator_queue *e = q->elevator; if (e->type->ops.elevator_deactivate_req_fn) e->type->ops.elevator_deactivate_req_fn(q, rq); } #ifdef CONFIG_FAIL_IO_TIMEOUT int blk_should_fake_timeout(struct request_queue *); ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); ssize_t part_timeout_store(struct device *, struct device_attribute *, const char *, size_t); #else static inline int blk_should_fake_timeout(struct request_queue *q) { return 0; } #endif int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio); int ll_front_merge_fn(struct request_queue *q, struct request *req, struct bio *bio); int attempt_back_merge(struct request_queue *q, struct request *rq); int attempt_front_merge(struct request_queue *q, struct request *rq); int blk_attempt_req_merge(struct request_queue *q, struct request *rq, struct request *next); void blk_recalc_rq_segments(struct request *rq); void blk_rq_set_mixed_merge(struct request *rq); bool blk_rq_merge_ok(struct request *rq, struct bio *bio); int blk_try_merge(struct request *rq, struct bio *bio); void blk_queue_congestion_threshold(struct request_queue *q); int blk_dev_init(void); void elv_quiesce_start(struct request_queue *q); void elv_quiesce_end(struct request_queue *q); /* * Return the threshold (number of used requests) at which the queue is * considered to be congested. It include a little hysteresis to keep the * context switch rate down. */ static inline int queue_congestion_on_threshold(struct request_queue *q) { return q->nr_congestion_on; } /* * The threshold at which a queue is considered to be uncongested */ static inline int queue_congestion_off_threshold(struct request_queue *q) { return q->nr_congestion_off; } /* * Contribute to IO statistics IFF: * * a) it's attached to a gendisk, and * b) the queue had IO stats enabled when this request was started, and * c) it's a file system request or a discard request */ static inline int blk_do_io_stat(struct request *rq) { return rq->rq_disk && (rq->cmd_flags & REQ_IO_STAT) && (rq->cmd_type == REQ_TYPE_FS || (rq->cmd_flags & REQ_DISCARD)); } /* * Internal io_context interface */ void get_io_context(struct io_context *ioc); struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask); void ioc_clear_queue(struct request_queue *q); void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask, int node); /** * create_io_context - try to create task->io_context * @task: target task * @gfp_mask: allocation mask * @node: allocation node * * If @task->io_context is %NULL, allocate a new io_context and install it. * Returns the current @task->io_context which may be %NULL if allocation * failed. * * Note that this function can't be called with IRQ disabled because * task_lock which protects @task->io_context is IRQ-unsafe. */ static inline struct io_context *create_io_context(struct task_struct *task, gfp_t gfp_mask, int node) { WARN_ON_ONCE(irqs_disabled()); if (unlikely(!task->io_context)) create_io_context_slowpath(task, gfp_mask, node); return task->io_context; } /* * Internal throttling interface */ #ifdef CONFIG_BLK_DEV_THROTTLING extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio); extern void blk_throtl_drain(struct request_queue *q); extern int blk_throtl_init(struct request_queue *q); extern void blk_throtl_exit(struct request_queue *q); extern void blk_throtl_release(struct request_queue *q); #else /* CONFIG_BLK_DEV_THROTTLING */ static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio) { return false; } static inline void blk_throtl_drain(struct request_queue *q) { } static inline int blk_throtl_init(struct request_queue *q) { return 0; } static inline void blk_throtl_exit(struct request_queue *q) { } static inline void blk_throtl_release(struct request_queue *q) { } #endif /* CONFIG_BLK_DEV_THROTTLING */ #endif /* BLK_INTERNAL_H */ |