aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/ll_rw_blk.c24
-rw-r--r--drivers/ide/ide-cd.c10
-rw-r--r--include/linux/blkdev.h1
3 files changed, 0 insertions, 35 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index c182c9f4b2c4..c44d6fe9f6ce 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -2734,30 +2734,6 @@ static inline int attempt_front_merge(request_queue_t *q, struct request *rq)
return 0;
}
-/**
- * blk_attempt_remerge - attempt to remerge active head with next request
- * @q: The &request_queue_t belonging to the device
- * @rq: The head request (usually)
- *
- * Description:
- * For head-active devices, the queue can easily be unplugged so quickly
- * that proper merging is not done on the front request. This may hurt
- * performance greatly for some devices. The block layer cannot safely
- * do merging on that first request for these queues, but the driver can
- * call this function and make it happen any way. Only the driver knows
- * when it is safe to do so.
- **/
-void blk_attempt_remerge(request_queue_t *q, struct request *rq)
-{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- attempt_back_merge(q, rq);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
-EXPORT_SYMBOL(blk_attempt_remerge);
-
static void init_request_from_bio(struct request *req, struct bio *bio)
{
req->flags |= REQ_CMD;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index d31117eb95aa..e4d55ad32d2f 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1332,8 +1332,6 @@ static ide_startstop_t cdrom_start_read (ide_drive_t *drive, unsigned int block)
if (cdrom_read_from_buffer(drive))
return ide_stopped;
- blk_attempt_remerge(drive->queue, rq);
-
/* Clear the local sector buffer. */
info->nsectors_buffered = 0;
@@ -1874,14 +1872,6 @@ static ide_startstop_t cdrom_start_write(ide_drive_t *drive, struct request *rq)
return ide_stopped;
}
- /*
- * for dvd-ram and such media, it's a really big deal to get
- * big writes all the time. so scour the queue and attempt to
- * remerge requests, often the plugging will not have had time
- * to do this properly
- */
- blk_attempt_remerge(drive->queue, rq);
-
info->nsectors_buffered = 0;
/* use dma, if possible. we don't need to check more, since we
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index fb0985377421..96b233991685 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -592,7 +592,6 @@ extern void generic_make_request(struct bio *bio);
extern void blk_put_request(struct request *);
extern void __blk_put_request(request_queue_t *, struct request *);
extern void blk_end_sync_rq(struct request *rq, int error);
-extern void blk_attempt_remerge(request_queue_t *, struct request *);
extern struct request *blk_get_request(request_queue_t *, int, gfp_t);
extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
extern void blk_requeue_request(request_queue_t *, struct request *);