aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace/events/block.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace/events/block.h')
-rw-r--r--include/trace/events/block.h311
1 files changed, 114 insertions, 197 deletions
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 81b43f5bdf23..7f4dfbdf12a6 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -64,7 +64,6 @@ DEFINE_EVENT(block_buffer, block_dirty_buffer,
/**
* block_rq_requeue - place block IO request back on a queue
- * @q: queue holding operation
* @rq: block IO operation request
*
* The block operation request @rq is being placed back into queue
@@ -73,9 +72,9 @@ DEFINE_EVENT(block_buffer, block_dirty_buffer,
*/
TRACE_EVENT(block_rq_requeue,
- TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_PROTO(struct request *rq),
- TP_ARGS(q, rq),
+ TP_ARGS(rq),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -86,11 +85,11 @@ TRACE_EVENT(block_rq_requeue,
),
TP_fast_assign(
- __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
__entry->sector = blk_rq_trace_sector(rq);
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
- blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
__get_str(cmd)[0] = '\0';
),
@@ -101,21 +100,9 @@ TRACE_EVENT(block_rq_requeue,
__entry->nr_sector, 0)
);
-/**
- * block_rq_complete - block IO operation completed by device driver
- * @rq: block operations request
- * @error: status code
- * @nr_bytes: number of completed bytes
- *
- * The block_rq_complete tracepoint event indicates that some portion
- * of operation request has been completed by the device driver. If
- * the @rq->bio is %NULL, then there is absolutely no additional work to
- * do for the request. If @rq->bio is non-NULL then there is
- * additional work required to complete the request.
- */
-TRACE_EVENT(block_rq_complete,
+DECLARE_EVENT_CLASS(block_rq_completion,
- TP_PROTO(struct request *rq, int error, unsigned int nr_bytes),
+ TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes),
TP_ARGS(rq, error, nr_bytes),
@@ -123,18 +110,18 @@ TRACE_EVENT(block_rq_complete,
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
- __field( int, error )
+ __field( int , error )
__array( char, rwbs, RWBS_LEN )
__dynamic_array( char, cmd, 1 )
),
TP_fast_assign(
- __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
__entry->sector = blk_rq_pos(rq);
__entry->nr_sector = nr_bytes >> 9;
- __entry->error = error;
+ __entry->error = blk_status_to_errno(error);
- blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
__get_str(cmd)[0] = '\0';
),
@@ -145,11 +132,46 @@ TRACE_EVENT(block_rq_complete,
__entry->nr_sector, __entry->error)
);
+/**
+ * block_rq_complete - block IO operation completed by device driver
+ * @rq: block operations request
+ * @error: status code
+ * @nr_bytes: number of completed bytes
+ *
+ * The block_rq_complete tracepoint event indicates that some portion
+ * of operation request has been completed by the device driver. If
+ * the @rq->bio is %NULL, then there is absolutely no additional work to
+ * do for the request. If @rq->bio is non-NULL then there is
+ * additional work required to complete the request.
+ */
+DEFINE_EVENT(block_rq_completion, block_rq_complete,
+
+ TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes),
+
+ TP_ARGS(rq, error, nr_bytes)
+);
+
+/**
+ * block_rq_error - block IO operation error reported by device driver
+ * @rq: block operations request
+ * @error: status code
+ * @nr_bytes: number of completed bytes
+ *
+ * The block_rq_error tracepoint event indicates that some portion
+ * of operation request has failed as reported by the device driver.
+ */
+DEFINE_EVENT(block_rq_completion, block_rq_error,
+
+ TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes),
+
+ TP_ARGS(rq, error, nr_bytes)
+);
+
DECLARE_EVENT_CLASS(block_rq,
- TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_PROTO(struct request *rq),
- TP_ARGS(q, rq),
+ TP_ARGS(rq),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -162,12 +184,12 @@ DECLARE_EVENT_CLASS(block_rq,
),
TP_fast_assign(
- __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
__entry->sector = blk_rq_trace_sector(rq);
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
__entry->bytes = blk_rq_bytes(rq);
- blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
__get_str(cmd)[0] = '\0';
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -181,7 +203,6 @@ DECLARE_EVENT_CLASS(block_rq,
/**
* block_rq_insert - insert block operation request into queue
- * @q: target queue
* @rq: block IO operation request
*
* Called immediately before block operation request @rq is inserted
@@ -191,79 +212,52 @@ DECLARE_EVENT_CLASS(block_rq,
*/
DEFINE_EVENT(block_rq, block_rq_insert,
- TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_PROTO(struct request *rq),
- TP_ARGS(q, rq)
+ TP_ARGS(rq)
);
/**
* block_rq_issue - issue pending block IO request operation to device driver
- * @q: queue holding operation
- * @rq: block IO operation operation request
+ * @rq: block IO operation request
*
* Called when block operation request @rq from queue @q is sent to a
* device driver for processing.
*/
DEFINE_EVENT(block_rq, block_rq_issue,
- TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_PROTO(struct request *rq),
- TP_ARGS(q, rq)
+ TP_ARGS(rq)
);
/**
- * block_bio_bounce - used bounce buffer when processing block operation
- * @q: queue holding the block operation
- * @bio: block operation
+ * block_rq_merge - merge request with another one in the elevator
+ * @rq: block IO operation request
*
- * A bounce buffer was used to handle the block operation @bio in @q.
- * This occurs when hardware limitations prevent a direct transfer of
- * data between the @bio data memory area and the IO device. Use of a
- * bounce buffer requires extra copying of data and decreases
- * performance.
+ * Called when block operation request @rq from queue @q is merged to another
+ * request queued in the elevator.
*/
-TRACE_EVENT(block_bio_bounce,
-
- TP_PROTO(struct request_queue *q, struct bio *bio),
-
- TP_ARGS(q, bio),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( sector_t, sector )
- __field( unsigned int, nr_sector )
- __array( char, rwbs, RWBS_LEN )
- __array( char, comm, TASK_COMM_LEN )
- ),
+DEFINE_EVENT(block_rq, block_rq_merge,
- TP_fast_assign(
- __entry->dev = bio_dev(bio);
- __entry->sector = bio->bi_iter.bi_sector;
- __entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
- memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
- ),
+ TP_PROTO(struct request *rq),
- TP_printk("%d,%d %s %llu + %u [%s]",
- MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
- (unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->comm)
+ TP_ARGS(rq)
);
/**
* block_bio_complete - completed all work on the block operation
* @q: queue holding the block operation
* @bio: block operation completed
- * @error: io error value
*
* This tracepoint indicates there is no further work to do on this
* block IO operation @bio.
*/
TRACE_EVENT(block_bio_complete,
- TP_PROTO(struct request_queue *q, struct bio *bio, int error),
+ TP_PROTO(struct request_queue *q, struct bio *bio),
- TP_ARGS(q, bio, error),
+ TP_ARGS(q, bio),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -277,8 +271,8 @@ TRACE_EVENT(block_bio_complete,
__entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
- __entry->error = error;
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ __entry->error = blk_status_to_errno(bio->bi_status);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
),
TP_printk("%d,%d %s %llu + %u [%d]",
@@ -287,11 +281,11 @@ TRACE_EVENT(block_bio_complete,
__entry->nr_sector, __entry->error)
);
-DECLARE_EVENT_CLASS(block_bio_merge,
+DECLARE_EVENT_CLASS(block_bio,
- TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
+ TP_PROTO(struct bio *bio),
- TP_ARGS(q, rq, bio),
+ TP_ARGS(bio),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -305,7 +299,7 @@ DECLARE_EVENT_CLASS(block_bio_merge,
__entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -316,133 +310,62 @@ DECLARE_EVENT_CLASS(block_bio_merge,
);
/**
+ * block_bio_bounce - used bounce buffer when processing block operation
+ * @bio: block operation
+ *
+ * A bounce buffer was used to handle the block operation @bio in @q.
+ * This occurs when hardware limitations prevent a direct transfer of
+ * data between the @bio data memory area and the IO device. Use of a
+ * bounce buffer requires extra copying of data and decreases
+ * performance.
+ */
+DEFINE_EVENT(block_bio, block_bio_bounce,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
+);
+
+/**
* block_bio_backmerge - merging block operation to the end of an existing operation
- * @q: queue holding operation
- * @rq: request bio is being merged into
* @bio: new block operation to merge
*
- * Merging block request @bio to the end of an existing block request
- * in queue @q.
+ * Merging block request @bio to the end of an existing block request.
*/
-DEFINE_EVENT(block_bio_merge, block_bio_backmerge,
-
- TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
-
- TP_ARGS(q, rq, bio)
+DEFINE_EVENT(block_bio, block_bio_backmerge,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
);
/**
* block_bio_frontmerge - merging block operation to the beginning of an existing operation
- * @q: queue holding operation
- * @rq: request bio is being merged into
* @bio: new block operation to merge
*
- * Merging block IO operation @bio to the beginning of an existing block
- * operation in queue @q.
+ * Merging block IO operation @bio to the beginning of an existing block request.
*/
-DEFINE_EVENT(block_bio_merge, block_bio_frontmerge,
-
- TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
-
- TP_ARGS(q, rq, bio)
+DEFINE_EVENT(block_bio, block_bio_frontmerge,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
);
/**
* block_bio_queue - putting new block IO operation in queue
- * @q: queue holding operation
* @bio: new block operation
*
* About to place the block IO operation @bio into queue @q.
*/
-TRACE_EVENT(block_bio_queue,
-
- TP_PROTO(struct request_queue *q, struct bio *bio),
-
- TP_ARGS(q, bio),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( sector_t, sector )
- __field( unsigned int, nr_sector )
- __array( char, rwbs, RWBS_LEN )
- __array( char, comm, TASK_COMM_LEN )
- ),
-
- TP_fast_assign(
- __entry->dev = bio_dev(bio);
- __entry->sector = bio->bi_iter.bi_sector;
- __entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
- memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
- ),
-
- TP_printk("%d,%d %s %llu + %u [%s]",
- MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
- (unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->comm)
-);
-
-DECLARE_EVENT_CLASS(block_get_rq,
-
- TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
-
- TP_ARGS(q, bio, rw),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( sector_t, sector )
- __field( unsigned int, nr_sector )
- __array( char, rwbs, RWBS_LEN )
- __array( char, comm, TASK_COMM_LEN )
- ),
-
- TP_fast_assign(
- __entry->dev = bio ? bio_dev(bio) : 0;
- __entry->sector = bio ? bio->bi_iter.bi_sector : 0;
- __entry->nr_sector = bio ? bio_sectors(bio) : 0;
- blk_fill_rwbs(__entry->rwbs,
- bio ? bio->bi_opf : 0, __entry->nr_sector);
- memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
- ),
-
- TP_printk("%d,%d %s %llu + %u [%s]",
- MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
- (unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->comm)
+DEFINE_EVENT(block_bio, block_bio_queue,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
);
/**
* block_getrq - get a free request entry in queue for block IO operations
- * @q: queue for operations
* @bio: pending block IO operation (can be %NULL)
- * @rw: low bit indicates a read (%0) or a write (%1)
*
- * A request struct for queue @q has been allocated to handle the
- * block IO operation @bio.
+ * A request struct has been allocated to handle the block IO operation @bio.
*/
-DEFINE_EVENT(block_get_rq, block_getrq,
-
- TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
-
- TP_ARGS(q, bio, rw)
-);
-
-/**
- * block_sleeprq - waiting to get a free request entry in queue for block IO operation
- * @q: queue for operation
- * @bio: pending block IO operation (can be %NULL)
- * @rw: low bit indicates a read (%0) or a write (%1)
- *
- * In the case where a request struct cannot be provided for queue @q
- * the process needs to wait for an request struct to become
- * available. This tracepoint event is generated each time the
- * process goes to sleep waiting for request struct become available.
- */
-DEFINE_EVENT(block_get_rq, block_sleeprq,
-
- TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
-
- TP_ARGS(q, bio, rw)
+DEFINE_EVENT(block_bio, block_getrq,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
);
/**
@@ -507,21 +430,19 @@ DEFINE_EVENT(block_unplug, block_unplug,
/**
* block_split - split a single bio struct into two bio structs
- * @q: queue containing the bio
* @bio: block operation being split
* @new_sector: The starting sector for the new bio
*
- * The bio request @bio in request queue @q needs to be split into two
- * bio requests. The newly created @bio request starts at
- * @new_sector. This split may be required due to hardware limitation
- * such as operation crossing device boundaries in a RAID system.
+ * The bio request @bio needs to be split into two bio requests. The newly
+ * created @bio request starts at @new_sector. This split may be required due to
+ * hardware limitations such as operation crossing device boundaries in a RAID
+ * system.
*/
TRACE_EVENT(block_split,
- TP_PROTO(struct request_queue *q, struct bio *bio,
- unsigned int new_sector),
+ TP_PROTO(struct bio *bio, unsigned int new_sector),
- TP_ARGS(q, bio, new_sector),
+ TP_ARGS(bio, new_sector),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -535,7 +456,7 @@ TRACE_EVENT(block_split,
__entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector;
__entry->new_sector = new_sector;
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -548,9 +469,8 @@ TRACE_EVENT(block_split,
/**
* block_bio_remap - map request for a logical device to the raw device
- * @q: queue holding the operation
* @bio: revised operation
- * @dev: device for the operation
+ * @dev: original device for the operation
* @from: original sector for the operation
*
* An operation for a logical device has been mapped to the
@@ -558,10 +478,9 @@ TRACE_EVENT(block_split,
*/
TRACE_EVENT(block_bio_remap,
- TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
- sector_t from),
+ TP_PROTO(struct bio *bio, dev_t dev, sector_t from),
- TP_ARGS(q, bio, dev, from),
+ TP_ARGS(bio, dev, from),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -578,7 +497,7 @@ TRACE_EVENT(block_bio_remap,
__entry->nr_sector = bio_sectors(bio);
__entry->old_dev = dev;
__entry->old_sector = from;
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
),
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
@@ -591,7 +510,6 @@ TRACE_EVENT(block_bio_remap,
/**
* block_rq_remap - map request for a block operation request
- * @q: queue holding the operation
* @rq: block IO operation request
* @dev: device for the operation
* @from: original sector for the operation
@@ -602,10 +520,9 @@ TRACE_EVENT(block_bio_remap,
*/
TRACE_EVENT(block_rq_remap,
- TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
- sector_t from),
+ TP_PROTO(struct request *rq, dev_t dev, sector_t from),
- TP_ARGS(q, rq, dev, from),
+ TP_ARGS(rq, dev, from),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -618,13 +535,13 @@ TRACE_EVENT(block_rq_remap,
),
TP_fast_assign(
- __entry->dev = disk_devt(rq->rq_disk);
+ __entry->dev = disk_devt(rq->q->disk);
__entry->sector = blk_rq_pos(rq);
__entry->nr_sector = blk_rq_sectors(rq);
__entry->old_dev = dev;
__entry->old_sector = from;
__entry->nr_bios = blk_rq_count_bios(rq);
- blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
),
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",