aboutsummaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/backing-dev.h12
-rw-r--r--include/linux/blkdev.h52
2 files changed, 41 insertions, 23 deletions
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index bee52abb8a4..0ec2c594868 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -24,8 +24,8 @@ struct dentry;
*/
enum bdi_state {
BDI_pdflush, /* A pdflush thread is working this device */
- BDI_write_congested, /* The write queue is getting full */
- BDI_read_congested, /* The read queue is getting full */
+ BDI_async_congested, /* The async (write) queue is getting full */
+ BDI_sync_congested, /* The sync queue is getting full */
BDI_unused, /* Available bits start here */
};
@@ -215,18 +215,18 @@ static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
static inline int bdi_read_congested(struct backing_dev_info *bdi)
{
- return bdi_congested(bdi, 1 << BDI_read_congested);
+ return bdi_congested(bdi, 1 << BDI_sync_congested);
}
static inline int bdi_write_congested(struct backing_dev_info *bdi)
{
- return bdi_congested(bdi, 1 << BDI_write_congested);
+ return bdi_congested(bdi, 1 << BDI_async_congested);
}
static inline int bdi_rw_congested(struct backing_dev_info *bdi)
{
- return bdi_congested(bdi, (1 << BDI_read_congested)|
- (1 << BDI_write_congested));
+ return bdi_congested(bdi, (1 << BDI_sync_congested) |
+ (1 << BDI_async_congested));
}
void clear_bdi_congested(struct backing_dev_info *bdi, int rw);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 465d6babc84..67dae3bd881 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -38,6 +38,10 @@ struct request;
typedef void (rq_end_io_fn)(struct request *, int);
struct request_list {
+ /*
+ * count[], starved[], and wait[] are indexed by
+ * BLK_RW_SYNC/BLK_RW_ASYNC
+ */
int count[2];
int starved[2];
int elvpriv;
@@ -66,6 +70,11 @@ enum rq_cmd_type_bits {
REQ_TYPE_ATA_PC,
};
+enum {
+ BLK_RW_ASYNC = 0,
+ BLK_RW_SYNC = 1,
+};
+
/*
* For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
* sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
@@ -103,7 +112,7 @@ enum rq_flag_bits {
__REQ_QUIET, /* don't worry about errors */
__REQ_PREEMPT, /* set for "ide_preempt" requests */
__REQ_ORDERED_COLOR, /* is before or after barrier */
- __REQ_RW_SYNC, /* request is sync (O_DIRECT) */
+ __REQ_RW_SYNC, /* request is sync (sync write or read) */
__REQ_ALLOCED, /* request came from our alloc pool */
__REQ_RW_META, /* metadata io request */
__REQ_COPY_USER, /* contains copies of user pages */
@@ -438,8 +447,8 @@ struct request_queue
#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
-#define QUEUE_FLAG_READFULL 3 /* read queue has been filled */
-#define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */
+#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
+#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
@@ -611,32 +620,41 @@ enum {
#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
/*
- * We regard a request as sync, if it's a READ or a SYNC write.
+ * We regard a request as sync, if either a read or a sync write
*/
-#define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC)
+static inline bool rw_is_sync(unsigned int rw_flags)
+{
+ return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC);
+}
+
+static inline bool rq_is_sync(struct request *rq)
+{
+ return rw_is_sync(rq->cmd_flags);
+}
+
#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META)
-static inline int blk_queue_full(struct request_queue *q, int rw)
+static inline int blk_queue_full(struct request_queue *q, int sync)
{
- if (rw == READ)
- return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
- return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
+ if (sync)
+ return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
+ return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
}
-static inline void blk_set_queue_full(struct request_queue *q, int rw)
+static inline void blk_set_queue_full(struct request_queue *q, int sync)
{
- if (rw == READ)
- queue_flag_set(QUEUE_FLAG_READFULL, q);
+ if (sync)
+ queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
else
- queue_flag_set(QUEUE_FLAG_WRITEFULL, q);
+ queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
}
-static inline void blk_clear_queue_full(struct request_queue *q, int rw)
+static inline void blk_clear_queue_full(struct request_queue *q, int sync)
{
- if (rw == READ)
- queue_flag_clear(QUEUE_FLAG_READFULL, q);
+ if (sync)
+ queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
else
- queue_flag_clear(QUEUE_FLAG_WRITEFULL, q);
+ queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
}