提交 2f9e825d 编写于 作者: L Linus Torvalds

Merge branch 'for-2.6.36' of git://git.kernel.dk/linux-2.6-block

* 'for-2.6.36' of git://git.kernel.dk/linux-2.6-block: (149 commits)
  block: make sure that REQ_* types are seen even with CONFIG_BLOCK=n
  xen-blkfront: fix missing out label
  blkdev: fix blkdev_issue_zeroout return value
  block: update request stacking methods to support discards
  block: fix missing export of blk_types.h
  writeback: fix bad _bh spinlock nesting
  drbd: revert "delay probes", feature is being re-implemented differently
  drbd: Initialize all members of sync_conf to their defaults [Bugz 315]
  drbd: Disable delay probes for the upcomming release
  writeback: cleanup bdi_register
  writeback: add new tracepoints
  writeback: remove unnecessary init_timer call
  writeback: optimize periodic bdi thread wakeups
  writeback: prevent unnecessary bdi threads wakeups
  writeback: move bdi threads exiting logic to the forker thread
  writeback: restructure bdi forker loop a little
  writeback: move last_active to bdi
  writeback: do not remove bdi from bdi_list
  writeback: simplify bdi code a little
  writeback: do not lose wake-ups in bdi threads
  ...

Fixed up pretty trivial conflicts in drivers/block/virtio_blk.c and
drivers/scsi/scsi_error.c as per Jens.
......@@ -3,6 +3,4 @@
#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (~0UL)
#endif /* !(_ALPHA_SCATTERLIST_H) */
......@@ -3,6 +3,4 @@
#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (0xffffffff)
#endif /* __ASM_AVR32_SCATTERLIST_H */
......@@ -3,6 +3,4 @@
#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (0xffffffff)
#endif /* !(_BLACKFIN_SCATTERLIST_H) */
......@@ -3,6 +3,4 @@
#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (0x1fffffff)
#endif /* !(__ASM_CRIS_SCATTERLIST_H) */
......@@ -3,6 +3,4 @@
#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (0xffffffffUL)
#endif /* !_ASM_SCATTERLIST_H */
......@@ -3,6 +3,4 @@
#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (0xffffffff)
#endif /* !(_H8300_SCATTERLIST_H) */
......@@ -2,15 +2,6 @@
#define _ASM_IA64_SCATTERLIST_H
#include <asm-generic/scatterlist.h>
/*
* It used to be that ISA_DMA_THRESHOLD had something to do with the
* DMA-limits of ISA-devices. Nowadays, its only remaining use (apart
* from the aha1542.c driver, which isn't 64-bit clean anyhow) is to
* tell the block-layer (via BLK_BOUNCE_ISA) what the max. physical
* address of a page is that is allocated with GFP_DMA. On IA-64,
* that's 4GB - 1.
*/
#define ISA_DMA_THRESHOLD 0xffffffff
#define ARCH_HAS_SG_CHAIN
#endif /* _ASM_IA64_SCATTERLIST_H */
......@@ -3,6 +3,4 @@
#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (0x1fffffff)
#endif /* _ASM_M32R_SCATTERLIST_H */
......@@ -3,7 +3,4 @@
#include <asm-generic/scatterlist.h>
/* This is bogus and should go away. */
#define ISA_DMA_THRESHOLD (0x00ffffff)
#endif /* !(_M68K_SCATTERLIST_H) */
#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (~0UL)
......@@ -3,6 +3,4 @@
#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (0x00ffffffUL)
#endif /* __ASM_SCATTERLIST_H */
......@@ -13,6 +13,4 @@
#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (0x00ffffff)
#endif /* _ASM_SCATTERLIST_H */
......@@ -5,7 +5,6 @@
#include <asm/types.h>
#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (~0UL)
#define sg_virt_addr(sg) ((unsigned long)sg_virt(sg))
#endif /* _ASM_PARISC_SCATTERLIST_H */
......@@ -12,9 +12,6 @@
#include <asm/dma.h>
#include <asm-generic/scatterlist.h>
#ifdef __powerpc64__
#define ISA_DMA_THRESHOLD (~0UL)
#endif
#define ARCH_HAS_SG_CHAIN
#endif /* _ASM_POWERPC_SCATTERLIST_H */
#define ISA_DMA_THRESHOLD (~0UL)
#include <asm-generic/scatterlist.h>
#ifndef _ASM_SCORE_SCATTERLIST_H
#define _ASM_SCORE_SCATTERLIST_H
#define ISA_DMA_THRESHOLD (~0UL)
#include <asm-generic/scatterlist.h>
#endif /* _ASM_SCORE_SCATTERLIST_H */
#ifndef __ASM_SH_SCATTERLIST_H
#define __ASM_SH_SCATTERLIST_H
#define ISA_DMA_THRESHOLD phys_addr_mask()
#include <asm-generic/scatterlist.h>
#endif /* __ASM_SH_SCATTERLIST_H */
......@@ -3,7 +3,6 @@
#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (~0UL)
#define ARCH_HAS_SG_CHAIN
#endif /* !(_SPARC_SCATTERLIST_H) */
......@@ -33,6 +33,7 @@
#include "linux/mm.h"
#include "linux/slab.h"
#include "linux/vmalloc.h"
#include "linux/smp_lock.h"
#include "linux/blkpg.h"
#include "linux/genhd.h"
#include "linux/spinlock.h"
......@@ -1098,6 +1099,7 @@ static int ubd_open(struct block_device *bdev, fmode_t mode)
struct ubd *ubd_dev = disk->private_data;
int err = 0;
lock_kernel();
if(ubd_dev->count == 0){
err = ubd_open_dev(ubd_dev);
if(err){
......@@ -1115,7 +1117,8 @@ static int ubd_open(struct block_device *bdev, fmode_t mode)
if(--ubd_dev->count == 0) ubd_close_dev(ubd_dev);
err = -EROFS;
}*/
out:
out:
unlock_kernel();
return err;
}
......@@ -1123,8 +1126,10 @@ static int ubd_release(struct gendisk *disk, fmode_t mode)
{
struct ubd *ubd_dev = disk->private_data;
lock_kernel();
if(--ubd_dev->count == 0)
ubd_close_dev(ubd_dev);
unlock_kernel();
return 0;
}
......
......@@ -3,7 +3,6 @@
#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (0x00ffffff)
#define ARCH_HAS_SG_CHAIN
#endif /* _ASM_X86_SCATTERLIST_H */
......@@ -13,6 +13,4 @@
#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (~0UL)
#endif /* _XTENSA_SCATTERLIST_H */
......@@ -13,7 +13,6 @@
* blk_queue_ordered - does this queue support ordered writes
* @q: the request queue
* @ordered: one of QUEUE_ORDERED_*
* @prepare_flush_fn: rq setup helper for cache flush ordered writes
*
* Description:
* For journalled file systems, doing ordered writes on a commit
......@@ -22,15 +21,8 @@
* feature should call this function and indicate so.
*
**/
int blk_queue_ordered(struct request_queue *q, unsigned ordered,
prepare_flush_fn *prepare_flush_fn)
int blk_queue_ordered(struct request_queue *q, unsigned ordered)
{
if (!prepare_flush_fn && (ordered & (QUEUE_ORDERED_DO_PREFLUSH |
QUEUE_ORDERED_DO_POSTFLUSH))) {
printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
return -EINVAL;
}
if (ordered != QUEUE_ORDERED_NONE &&
ordered != QUEUE_ORDERED_DRAIN &&
ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
......@@ -44,7 +36,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
q->ordered = ordered;
q->next_ordered = ordered;
q->prepare_flush_fn = prepare_flush_fn;
return 0;
}
......@@ -79,7 +70,7 @@ unsigned blk_ordered_req_seq(struct request *rq)
*
* http://thread.gmane.org/gmane.linux.kernel/537473
*/
if (!blk_fs_request(rq))
if (rq->cmd_type != REQ_TYPE_FS)
return QUEUE_ORDSEQ_DRAIN;
if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
......@@ -143,10 +134,10 @@ static void queue_flush(struct request_queue *q, unsigned which)
}
blk_rq_init(q, rq);
rq->cmd_flags = REQ_HARDBARRIER;
rq->rq_disk = q->bar_rq.rq_disk;
rq->cmd_type = REQ_TYPE_FS;
rq->cmd_flags = REQ_HARDBARRIER | REQ_FLUSH;
rq->rq_disk = q->orig_bar_rq->rq_disk;
rq->end_io = end_io;
q->prepare_flush_fn(q, rq);
elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
}
......@@ -203,7 +194,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
/* initialize proxy request and queue it */
blk_rq_init(q, rq);
if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
rq->cmd_flags |= REQ_RW;
rq->cmd_flags |= REQ_WRITE;
if (q->ordered & QUEUE_ORDERED_DO_FUA)
rq->cmd_flags |= REQ_FUA;
init_request_from_bio(rq, q->orig_bar_rq->bio);
......@@ -236,7 +227,8 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
bool blk_do_ordered(struct request_queue *q, struct request **rqp)
{
struct request *rq = *rqp;
const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
const int is_barrier = rq->cmd_type == REQ_TYPE_FS &&
(rq->cmd_flags & REQ_HARDBARRIER);
if (!q->ordseq) {
if (!is_barrier)
......@@ -261,7 +253,7 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
*/
/* Special requests are not subject to ordering rules. */
if (!blk_fs_request(rq) &&
if (rq->cmd_type != REQ_TYPE_FS &&
rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
return true;
......@@ -319,6 +311,15 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
if (!q)
return -ENXIO;
/*
* some block devices may not have their queue correctly set up here
* (e.g. loop device without a backing file) and so issuing a flush
* here will panic. Ensure there is a request function before issuing
* the barrier.
*/
if (!q->make_request_fn)
return -ENXIO;
bio = bio_alloc(gfp_mask, 0);
bio->bi_end_io = bio_end_empty_barrier;
bio->bi_bdev = bdev;
......
......@@ -184,7 +184,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
if (blk_pc_request(rq)) {
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
printk(KERN_INFO " cdb: ");
for (bit = 0; bit < BLK_MAX_CDB; bit++)
printk("%02x ", rq->cmd[bit]);
......@@ -608,6 +608,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unprep_rq_fn = NULL;
q->unplug_fn = generic_unplug_device;
q->queue_flags = QUEUE_FLAG_DEFAULT;
q->queue_lock = lock;
......@@ -1135,30 +1136,46 @@ void blk_put_request(struct request *req)
}
EXPORT_SYMBOL(blk_put_request);
/**
* blk_add_request_payload - add a payload to a request
* @rq: request to update
* @page: page backing the payload
* @len: length of the payload.
*
* This allows to later add a payload to an already submitted request by
* a block driver. The driver needs to take care of freeing the payload
* itself.
*
* Note that this is a quite horrible hack and nothing but handling of
* discard requests should ever use it.
*/
void blk_add_request_payload(struct request *rq, struct page *page,
unsigned int len)
{
struct bio *bio = rq->bio;
bio->bi_io_vec->bv_page = page;
bio->bi_io_vec->bv_offset = 0;
bio->bi_io_vec->bv_len = len;
bio->bi_size = len;
bio->bi_vcnt = 1;
bio->bi_phys_segments = 1;
rq->__data_len = rq->resid_len = len;
rq->nr_phys_segments = 1;
rq->buffer = bio_data(bio);
}
EXPORT_SYMBOL_GPL(blk_add_request_payload);
void init_request_from_bio(struct request *req, struct bio *bio)
{
req->cpu = bio->bi_comp_cpu;
req->cmd_type = REQ_TYPE_FS;
/*
* Inherit FAILFAST from bio (for read-ahead, and explicit
* FAILFAST). FAILFAST flags are identical for req and bio.
*/
if (bio_rw_flagged(bio, BIO_RW_AHEAD))
req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
if (bio->bi_rw & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK;
else
req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
if (bio_rw_flagged(bio, BIO_RW_DISCARD))
req->cmd_flags |= REQ_DISCARD;
if (bio_rw_flagged(bio, BIO_RW_BARRIER))
req->cmd_flags |= REQ_HARDBARRIER;
if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
req->cmd_flags |= REQ_RW_SYNC;
if (bio_rw_flagged(bio, BIO_RW_META))
req->cmd_flags |= REQ_RW_META;
if (bio_rw_flagged(bio, BIO_RW_NOIDLE))
req->cmd_flags |= REQ_NOIDLE;
req->errors = 0;
req->__sector = bio->bi_sector;
......@@ -1181,12 +1198,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
int el_ret;
unsigned int bytes = bio->bi_size;
const unsigned short prio = bio_prio(bio);
const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG);
const bool sync = (bio->bi_rw & REQ_SYNC);
const bool unplug = (bio->bi_rw & REQ_UNPLUG);
const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
int rw_flags;
if (bio_rw_flagged(bio, BIO_RW_BARRIER) &&
if ((bio->bi_rw & REQ_HARDBARRIER) &&
(q->next_ordered == QUEUE_ORDERED_NONE)) {
bio_endio(bio, -EOPNOTSUPP);
return 0;
......@@ -1200,7 +1217,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
spin_lock_irq(q->queue_lock);
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q))
if (unlikely((bio->bi_rw & REQ_HARDBARRIER)) || elv_queue_empty(q))
goto get_rq;
el_ret = elv_merge(q, &req, bio);
......@@ -1275,7 +1292,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
*/
rw_flags = bio_data_dir(bio);
if (sync)
rw_flags |= REQ_RW_SYNC;
rw_flags |= REQ_SYNC;
/*
* Grab a free request. This is might sleep but can not fail.
......@@ -1464,7 +1481,7 @@ static inline void __generic_make_request(struct bio *bio)
goto end_io;
}
if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) &&
if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
nr_sectors > queue_max_hw_sectors(q))) {
printk(KERN_ERR "bio too big device %s (%u > %u)\n",
bdevname(bio->bi_bdev, b),
......@@ -1497,8 +1514,7 @@ static inline void __generic_make_request(struct bio *bio)
if (bio_check_eod(bio, nr_sectors))
goto end_io;
if (bio_rw_flagged(bio, BIO_RW_DISCARD) &&
!blk_queue_discard(q)) {
if ((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(q)) {
err = -EOPNOTSUPP;
goto end_io;
}
......@@ -1583,7 +1599,7 @@ void submit_bio(int rw, struct bio *bio)
* If it's a regular read/write or a barrier with data attached,
* go through the normal accounting stuff before submission.
*/
if (bio_has_data(bio) && !(rw & (1 << BIO_RW_DISCARD))) {
if (bio_has_data(bio) && !(rw & REQ_DISCARD)) {
if (rw & WRITE) {
count_vm_events(PGPGOUT, count);
} else {
......@@ -1628,6 +1644,9 @@ EXPORT_SYMBOL(submit_bio);
*/
int blk_rq_check_limits(struct request_queue *q, struct request *rq)
{
if (rq->cmd_flags & REQ_DISCARD)
return 0;
if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
printk(KERN_ERR "%s: over max size limit.\n", __func__);
......@@ -1796,7 +1815,7 @@ struct request *blk_peek_request(struct request_queue *q)
* sees this request (possibly after
* requeueing). Notify IO scheduler.
*/
if (blk_sorted_rq(rq))
if (rq->cmd_flags & REQ_SORTED)
elv_activate_rq(q, rq);
/*
......@@ -1984,10 +2003,11 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
* TODO: tj: This is too subtle. It would be better to let
* low level drivers do what they see fit.
*/
if (blk_fs_request(req))
if (req->cmd_type == REQ_TYPE_FS)
req->errors = 0;
if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
if (error && req->cmd_type == REQ_TYPE_FS &&
!(req->cmd_flags & REQ_QUIET)) {
printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
req->rq_disk ? req->rq_disk->disk_name : "?",
(unsigned long long)blk_rq_pos(req));
......@@ -2074,7 +2094,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
req->buffer = bio_data(req->bio);
/* update sector only for requests with clear definition of sector */
if (blk_fs_request(req) || blk_discard_rq(req))
if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD))
req->__sector += total_bytes >> 9;
/* mixed attributes always follow the first bio */
......@@ -2111,11 +2131,32 @@ static bool blk_update_bidi_request(struct request *rq, int error,
blk_update_request(rq->next_rq, error, bidi_bytes))
return true;
add_disk_randomness(rq->rq_disk);
if (blk_queue_add_random(rq->q))
add_disk_randomness(rq->rq_disk);
return false;
}
/**
* blk_unprep_request - unprepare a request
* @req: the request
*
* This function makes a request ready for complete resubmission (or
* completion). It happens only after all error handling is complete,
* so represents the appropriate moment to deallocate any resources
* that were allocated to the request in the prep_rq_fn. The queue
* lock is held when calling this.
*/
void blk_unprep_request(struct request *req)
{
struct request_queue *q = req->q;
req->cmd_flags &= ~REQ_DONTPREP;
if (q->unprep_rq_fn)
q->unprep_rq_fn(q, req);
}
EXPORT_SYMBOL_GPL(blk_unprep_request);
/*
* queue lock must be held
*/
......@@ -2126,11 +2167,15 @@ static void blk_finish_request(struct request *req, int error)
BUG_ON(blk_queued_rq(req));
if (unlikely(laptop_mode) && blk_fs_request(req))
if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
laptop_io_completion(&req->q->backing_dev_info);
blk_delete_timer(req);
if (req->cmd_flags & REQ_DONTPREP)
blk_unprep_request(req);
blk_account_io_done(req);
if (req->end_io)
......@@ -2363,7 +2408,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio)
{
/* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
rq->cmd_flags |= bio->bi_rw & REQ_RW;
rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
if (bio_has_data(bio)) {
rq->nr_phys_segments = bio_phys_segments(q, bio);
......@@ -2450,6 +2495,8 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
{
dst->cpu = src->cpu;
dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE);
if (src->cmd_flags & REQ_DISCARD)
dst->cmd_flags |= REQ_DISCARD;
dst->cmd_type = src->cmd_type;
dst->__sector = blk_rq_pos(src);
dst->__data_len = blk_rq_bytes(src);
......
......@@ -57,7 +57,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
__elv_add_request(q, rq, where, 1);
__generic_unplug_device(q);
/* the queue is stopped so it won't be plugged+unplugged */
if (blk_pm_resume_request(rq))
if (rq->cmd_type == REQ_TYPE_PM_RESUME)
q->request_fn(q);
spin_unlock_irq(q->queue_lock);
}
......
......@@ -19,7 +19,6 @@ static void blkdev_discard_end_io(struct bio *bio, int err)
if (bio->bi_private)
complete(bio->bi_private);
__free_page(bio_page(bio));
bio_put(bio);
}
......@@ -42,8 +41,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
struct request_queue *q = bdev_get_queue(bdev);
int type = flags & BLKDEV_IFL_BARRIER ?
DISCARD_BARRIER : DISCARD_NOBARRIER;
unsigned int max_discard_sectors;
struct bio *bio;
struct page *page;
int ret = 0;
if (!q)
......@@ -52,36 +51,30 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
while (nr_sects && !ret) {
unsigned int sector_size = q->limits.logical_block_size;
unsigned int max_discard_sectors =
min(q->limits.max_discard_sectors, UINT_MAX >> 9);
/*
* Ensure that max_discard_sectors is of the proper
* granularity
*/
max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
if (q->limits.discard_granularity) {
unsigned int disc_sects = q->limits.discard_granularity >> 9;
max_discard_sectors &= ~(disc_sects - 1);
}
while (nr_sects && !ret) {
bio = bio_alloc(gfp_mask, 1);
if (!bio)
goto out;
if (!bio) {
ret = -ENOMEM;
break;
}
bio->bi_sector = sector;
bio->bi_end_io = blkdev_discard_end_io;
bio->bi_bdev = bdev;
if (flags & BLKDEV_IFL_WAIT)
bio->bi_private = &wait;
/*
* Add a zeroed one-sector payload as that's what
* our current implementations need. If we'll ever need
* more the interface will need revisiting.
*/
page = alloc_page(gfp_mask | __GFP_ZERO);
if (!page)
goto out_free_bio;
if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
goto out_free_page;
/*
* And override the bio size - the way discard works we
* touch many more blocks on disk than the actual payload
* length.
*/
if (nr_sects > max_discard_sectors) {
bio->bi_size = max_discard_sectors << 9;
nr_sects -= max_discard_sectors;
......@@ -103,13 +96,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
ret = -EIO;
bio_put(bio);
}
return ret;
out_free_page:
__free_page(page);
out_free_bio:
bio_put(bio);
out:
return -ENOMEM;
}
EXPORT_SYMBOL(blkdev_issue_discard);
......@@ -157,7 +145,7 @@ static void bio_batch_end_io(struct bio *bio, int err)
int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
{
int ret = 0;
int ret;
struct bio *bio;
struct bio_batch bb;
unsigned int sz, issued = 0;
......@@ -175,11 +163,14 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
return ret;
}
submit:
ret = 0;
while (nr_sects != 0) {
bio = bio_alloc(gfp_mask,
min(nr_sects, (sector_t)BIO_MAX_PAGES));
if (!bio)
if (!bio) {
ret = -ENOMEM;
break;
}
bio->bi_sector = sector;
bio->bi_bdev = bdev;
......@@ -198,6 +189,7 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
if (ret < (sz << 9))
break;
}
ret = 0;
issued++;
submit_bio(WRITE, bio);
}
......
......@@ -307,7 +307,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
return PTR_ERR(bio);
if (rq_data_dir(rq) == WRITE)
bio->bi_rw |= (1 << BIO_RW);
bio->bi_rw |= (1 << REQ_WRITE);
if (do_copy)
rq->cmd_flags |= REQ_COPY_USER;
......
......@@ -12,7 +12,6 @@
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
struct bio *bio)
{
unsigned int phys_size;
struct bio_vec *bv, *bvprv = NULL;
int cluster, i, high, highprv = 1;
unsigned int seg_size, nr_phys_segs;
......@@ -24,7 +23,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
fbio = bio;
cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
seg_size = 0;
phys_size = nr_phys_segs = 0;
nr_phys_segs = 0;
for_each_bio(bio) {
bio_for_each_segment(bv, bio, i) {
/*
......@@ -180,7 +179,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
}
if (q->dma_drain_size && q->dma_drain_needed(rq)) {
if (rq->cmd_flags & REQ_RW)
if (rq->cmd_flags & REQ_WRITE)
memset(q->dma_drain_buffer, 0, q->dma_drain_size);
sg->page_link &= ~0x02;
......@@ -226,7 +225,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
{
unsigned short max_sectors;
if (unlikely(blk_pc_request(req)))
if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
max_sectors = queue_max_hw_sectors(q);
else
max_sectors = queue_max_sectors(q);
......@@ -250,7 +249,7 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
{
unsigned short max_sectors;
if (unlikely(blk_pc_request(req)))
if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
max_sectors = queue_max_hw_sectors(q);
else
max_sectors = queue_max_sectors(q);
......
......@@ -36,6 +36,23 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
}
EXPORT_SYMBOL(blk_queue_prep_rq);
/**
* blk_queue_unprep_rq - set an unprepare_request function for queue
* @q: queue
* @ufn: unprepare_request function
*
* It's possible for a queue to register an unprepare_request callback
* which is invoked before the request is finally completed. The goal
* of the function is to deallocate any data that was allocated in the
* prepare_request callback.
*
*/
void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
{
q->unprep_rq_fn = ufn;
}
EXPORT_SYMBOL(blk_queue_unprep_rq);
/**
* blk_queue_merge_bvec - set a merge_bvec function for queue
* @q: queue
......
......@@ -180,26 +180,36 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
return queue_var_show(max_hw_sectors_kb, (page));
}
static ssize_t queue_nonrot_show(struct request_queue *q, char *page)
{
return queue_var_show(!blk_queue_nonrot(q), page);
#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
static ssize_t \
queue_show_##name(struct request_queue *q, char *page) \
{ \
int bit; \
bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
return queue_var_show(neg ? !bit : bit, page); \
} \
static ssize_t \
queue_store_##name(struct request_queue *q, const char *page, size_t count) \
{ \
unsigned long val; \
ssize_t ret; \
ret = queue_var_store(&val, page, count); \
if (neg) \
val = !val; \
\
spin_lock_irq(q->queue_lock); \
if (val) \
queue_flag_set(QUEUE_FLAG_##flag, q); \
else \
queue_flag_clear(QUEUE_FLAG_##flag, q); \
spin_unlock_irq(q->queue_lock); \
return ret; \
}
static ssize_t queue_nonrot_store(struct request_queue *q, const char *page,
size_t count)
{
unsigned long nm;
ssize_t ret = queue_var_store(&nm, page, count);
spin_lock_irq(q->queue_lock);
if (nm)
queue_flag_clear(QUEUE_FLAG_NONROT, q);
else
queue_flag_set(QUEUE_FLAG_NONROT, q);
spin_unlock_irq(q->queue_lock);
return ret;
}
QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
#undef QUEUE_SYSFS_BIT_FNS
static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
{
......@@ -250,27 +260,6 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
return ret;
}
static ssize_t queue_iostats_show(struct request_queue *q, char *page)
{
return queue_var_show(blk_queue_io_stat(q), page);
}
static ssize_t queue_iostats_store(struct request_queue *q, const char *page,
size_t count)
{
unsigned long stats;
ssize_t ret = queue_var_store(&stats, page, count);
spin_lock_irq(q->queue_lock);
if (stats)
queue_flag_set(QUEUE_FLAG_IO_STAT, q);
else
queue_flag_clear(QUEUE_FLAG_IO_STAT, q);
spin_unlock_irq(q->queue_lock);
return ret;
}
static struct queue_sysfs_entry queue_requests_entry = {
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
.show = queue_requests_show,
......@@ -352,8 +341,8 @@ static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
static struct queue_sysfs_entry queue_nonrot_entry = {
.attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
.show = queue_nonrot_show,
.store = queue_nonrot_store,
.show = queue_show_nonrot,
.store = queue_store_nonrot,
};
static struct queue_sysfs_entry queue_nomerges_entry = {
......@@ -370,8 +359,14 @@ static struct queue_sysfs_entry queue_rq_affinity_entry = {
static struct queue_sysfs_entry queue_iostats_entry = {
.attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
.show = queue_iostats_show,
.store = queue_iostats_store,
.show = queue_show_iostats,
.store = queue_store_iostats,
};
static struct queue_sysfs_entry queue_random_entry = {
.attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
.show = queue_show_random,
.store = queue_store_random,
};
static struct attribute *default_attrs[] = {
......@@ -394,6 +389,7 @@ static struct attribute *default_attrs[] = {
&queue_nomerges_entry.attr,
&queue_rq_affinity_entry.attr,
&queue_iostats_entry.attr,
&queue_random_entry.attr,
NULL,
};
......
......@@ -161,8 +161,10 @@ static inline int blk_cpu_to_group(int cpu)
*/
static inline int blk_do_io_stat(struct request *rq)
{
return rq->rq_disk && blk_rq_io_stat(rq) &&
(blk_fs_request(rq) || blk_discard_rq(rq));
return rq->rq_disk &&
(rq->cmd_flags & REQ_IO_STAT) &&
(rq->cmd_type == REQ_TYPE_FS ||
(rq->cmd_flags & REQ_DISCARD));
}
#endif
......@@ -458,7 +458,7 @@ static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
*/
static inline bool cfq_bio_sync(struct bio *bio)
{
return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
}
/*
......@@ -646,9 +646,10 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
return rq1;
else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
return rq2;
if (rq_is_meta(rq1) && !rq_is_meta(rq2))
if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
return rq1;
else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
else if ((rq2->cmd_flags & REQ_META) &&
!(rq1->cmd_flags & REQ_META))
return rq2;
s1 = blk_rq_pos(rq1);
......@@ -1484,7 +1485,7 @@ static void cfq_remove_request(struct request *rq)
cfqq->cfqd->rq_queued--;
cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
rq_data_dir(rq), rq_is_sync(rq));
if (rq_is_meta(rq)) {
if (rq->cmd_flags & REQ_META) {
WARN_ON(!cfqq->meta_pending);
cfqq->meta_pending--;
}
......@@ -3176,7 +3177,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
* So both queues are sync. Let the new request get disk time if
* it's a metadata request and the current queue is doing regular IO.
*/
if (rq_is_meta(rq) && !cfqq->meta_pending)
if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending)
return true;
/*
......@@ -3230,7 +3231,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct cfq_io_context *cic = RQ_CIC(rq);
cfqd->rq_queued++;
if (rq_is_meta(rq))
if (rq->cmd_flags & REQ_META)
cfqq->meta_pending++;
cfq_update_io_thinktime(cfqd, cic);
......@@ -3365,7 +3366,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
unsigned long now;
now = jiffies;
cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", !!rq_noidle(rq));
cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
!!(rq->cmd_flags & REQ_NOIDLE));
cfq_update_hw_tag(cfqd);
......@@ -3419,11 +3421,12 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
cfq_slice_expired(cfqd, 1);
else if (sync && cfqq_empty &&
!cfq_close_cooperator(cfqd, cfqq)) {
cfqd->noidle_tree_requires_idle |= !rq_noidle(rq);
cfqd->noidle_tree_requires_idle |=
!(rq->cmd_flags & REQ_NOIDLE);
/*
* Idling is enabled for SYNC_WORKLOAD.
* SYNC_NOIDLE_WORKLOAD idles at the end of the tree
* only if we processed at least one !rq_noidle request
* only if we processed at least one !REQ_NOIDLE request
*/
if (cfqd->serving_type == SYNC_WORKLOAD
|| cfqd->noidle_tree_requires_idle
......
......@@ -535,56 +535,6 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
return err;
}
struct compat_blk_user_trace_setup {
char name[32];
u16 act_mask;
u32 buf_size;
u32 buf_nr;
compat_u64 start_lba;
compat_u64 end_lba;
u32 pid;
};
#define BLKTRACESETUP32 _IOWR(0x12, 115, struct compat_blk_user_trace_setup)
static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
{
struct blk_user_trace_setup buts;
struct compat_blk_user_trace_setup cbuts;
struct request_queue *q;
char b[BDEVNAME_SIZE];
int ret;
q = bdev_get_queue(bdev);
if (!q)
return -ENXIO;
if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
return -EFAULT;
bdevname(bdev, b);
buts = (struct blk_user_trace_setup) {
.act_mask = cbuts.act_mask,
.buf_size = cbuts.buf_size,
.buf_nr = cbuts.buf_nr,
.start_lba = cbuts.start_lba,
.end_lba = cbuts.end_lba,
.pid = cbuts.pid,
};
memcpy(&buts.name, &cbuts.name, 32);
mutex_lock(&bdev->bd_mutex);
ret = do_blk_trace_setup(q, b, bdev->bd_dev, bdev, &buts);
mutex_unlock(&bdev->bd_mutex);
if (ret)
return ret;
if (copy_to_user(arg, &buts.name, 32))
return -EFAULT;
return 0;
}
static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
......@@ -802,16 +752,10 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
return compat_put_u64(arg, bdev->bd_inode->i_size);
case BLKTRACESETUP32:
lock_kernel();
ret = compat_blk_trace_setup(bdev, compat_ptr(arg));
unlock_kernel();
return ret;
case BLKTRACESTART: /* compatible */
case BLKTRACESTOP: /* compatible */
case BLKTRACETEARDOWN: /* compatible */
lock_kernel();
ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
unlock_kernel();
return ret;
default:
if (disk->fops->compat_ioctl)
......
......@@ -79,8 +79,7 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
/*
* Don't merge file system requests and discard requests
*/
if (bio_rw_flagged(bio, BIO_RW_DISCARD) !=
bio_rw_flagged(rq->bio, BIO_RW_DISCARD))
if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
return 0;
/*
......@@ -428,7 +427,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
list_for_each_prev(entry, &q->queue_head) {
struct request *pos = list_entry_rq(entry);
if (blk_discard_rq(rq) != blk_discard_rq(pos))
if ((rq->cmd_flags & REQ_DISCARD) !=
(pos->cmd_flags & REQ_DISCARD))
break;
if (rq_data_dir(rq) != rq_data_dir(pos))
break;
......@@ -558,7 +558,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
*/
if (blk_account_rq(rq)) {
q->in_flight[rq_is_sync(rq)]--;
if (blk_sorted_rq(rq))
if (rq->cmd_flags & REQ_SORTED)
elv_deactivate_rq(q, rq);
}
......@@ -644,7 +644,8 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
break;
case ELEVATOR_INSERT_SORT:
BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
!(rq->cmd_flags & REQ_DISCARD));
rq->cmd_flags |= REQ_SORTED;
q->nr_sorted++;
if (rq_mergeable(rq)) {
......@@ -716,7 +717,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
/*
* toggle ordered color
*/
if (blk_barrier_rq(rq))
if (rq->cmd_flags & REQ_HARDBARRIER)
q->ordcolor ^= 1;
/*
......@@ -729,7 +730,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
* this request is scheduling boundary, update
* end_sector
*/
if (blk_fs_request(rq) || blk_discard_rq(rq)) {
if (rq->cmd_type == REQ_TYPE_FS ||
(rq->cmd_flags & REQ_DISCARD)) {
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
}
......@@ -843,7 +845,8 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
*/
if (blk_account_rq(rq)) {
q->in_flight[rq_is_sync(rq)]--;
if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
if ((rq->cmd_flags & REQ_SORTED) &&
e->ops->elevator_completed_req_fn)
e->ops->elevator_completed_req_fn(q, rq);
}
......
......@@ -163,18 +163,10 @@ int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
struct gendisk *disk = bdev->bd_disk;
int ret;
if (disk->fops->ioctl)
return disk->fops->ioctl(bdev, mode, cmd, arg);
if (disk->fops->locked_ioctl) {
lock_kernel();
ret = disk->fops->locked_ioctl(bdev, mode, cmd, arg);
unlock_kernel();
return ret;
}
return -ENOTTY;
}
/*
......@@ -185,8 +177,7 @@ int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl);
/*
* always keep this in sync with compat_blkdev_ioctl() and
* compat_blkdev_locked_ioctl()
* always keep this in sync with compat_blkdev_ioctl()
*/
int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
unsigned long arg)
......@@ -206,10 +197,8 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
if (ret != -EINVAL && ret != -ENOTTY)
return ret;
lock_kernel();
fsync_bdev(bdev);
invalidate_bdev(bdev);
unlock_kernel();
return 0;
case BLKROSET:
......@@ -221,9 +210,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
return -EACCES;
if (get_user(n, (int __user *)(arg)))
return -EFAULT;
lock_kernel();
set_device_ro(bdev, n);
unlock_kernel();
return 0;
case BLKDISCARD: {
......@@ -309,14 +296,10 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
bd_release(bdev);
return ret;
case BLKPG:
lock_kernel();
ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg);
unlock_kernel();
break;
case BLKRRPART:
lock_kernel();
ret = blkdev_reread_part(bdev);
unlock_kernel();
break;
case BLKGETSIZE:
size = bdev->bd_inode->i_size;
......@@ -329,9 +312,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case BLKTRACESTOP:
case BLKTRACESETUP:
case BLKTRACETEARDOWN:
lock_kernel();
ret = blk_trace_ioctl(bdev, cmd, (char __user *) arg);
unlock_kernel();
break;
default:
ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
......
......@@ -1111,10 +1111,10 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
*/
static int atapi_drain_needed(struct request *rq)
{
if (likely(!blk_pc_request(rq)))
if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC))
return 0;
if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW))
if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_WRITE))
return 0;
return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
......
......@@ -79,23 +79,28 @@ static int DAC960_open(struct block_device *bdev, fmode_t mode)
struct gendisk *disk = bdev->bd_disk;
DAC960_Controller_T *p = disk->queue->queuedata;
int drive_nr = (long)disk->private_data;
int ret = -ENXIO;
lock_kernel();
if (p->FirmwareType == DAC960_V1_Controller) {
if (p->V1.LogicalDriveInformation[drive_nr].
LogicalDriveState == DAC960_V1_LogicalDrive_Offline)
return -ENXIO;
goto out;
} else {
DAC960_V2_LogicalDeviceInfo_T *i =
p->V2.LogicalDeviceInformation[drive_nr];
if (!i || i->LogicalDeviceState == DAC960_V2_LogicalDevice_Offline)
return -ENXIO;
goto out;
}
check_disk_change(bdev);
if (!get_capacity(p->disks[drive_nr]))
return -ENXIO;
return 0;
goto out;
ret = 0;
out:
unlock_kernel();
return ret;
}
static int DAC960_getgeo(struct block_device *bdev, struct hd_geometry *geo)
......
......@@ -60,6 +60,7 @@
#include <linux/hdreg.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
#include <linux/amifdreg.h>
#include <linux/amifd.h>
#include <linux/buffer_head.h>
......@@ -1423,7 +1424,7 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
static int fd_ioctl(struct block_device *bdev, fmode_t mode,
static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long param)
{
struct amiga_floppy_struct *p = bdev->bd_disk->private_data;
......@@ -1500,6 +1501,18 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode,
return 0;
}
static int fd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long param)
{
int ret;
lock_kernel();
ret = fd_locked_ioctl(bdev, mode, cmd, param);
unlock_kernel();
return ret;
}
static void fd_probe(int dev)
{
unsigned long code;
......@@ -1542,10 +1555,13 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
int old_dev;
unsigned long flags;
lock_kernel();
old_dev = fd_device[drive];
if (fd_ref[drive] && old_dev != system)
if (fd_ref[drive] && old_dev != system) {
unlock_kernel();
return -EBUSY;
}
if (mode & (FMODE_READ|FMODE_WRITE)) {
check_disk_change(bdev);
......@@ -1558,8 +1574,10 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
fd_deselect (drive);
rel_fdc();
if (wrprot)
if (wrprot) {
unlock_kernel();
return -EROFS;
}
}
}
......@@ -1576,6 +1594,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
printk(KERN_INFO "fd%d: accessing %s-disk with %s-layout\n",drive,
unit[drive].type->name, data_types[system].name);
unlock_kernel();
return 0;
}
......@@ -1584,6 +1603,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
struct amiga_floppy_struct *p = disk->private_data;
int drive = p - unit;
lock_kernel();
if (unit[drive].dirty == 1) {
del_timer (flush_track_timer + drive);
non_int_flush_track (drive);
......@@ -1597,6 +1617,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
/* the mod_use counter is handled this way */
floppy_off (drive | 0x40000000);
#endif
unlock_kernel();
return 0;
}
......@@ -1638,7 +1659,7 @@ static const struct block_device_operations floppy_fops = {
.owner = THIS_MODULE,
.open = floppy_open,
.release = floppy_release,
.locked_ioctl = fd_ioctl,
.ioctl = fd_ioctl,
.getgeo = fd_getgeo,
.media_changed = amiga_floppy_change,
};
......
......@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/genhd.h>
#include <linux/netdevice.h>
#include <linux/smp_lock.h>
#include "aoe.h"
static struct kmem_cache *buf_pool_cache;
......@@ -124,13 +125,16 @@ aoeblk_open(struct block_device *bdev, fmode_t mode)
struct aoedev *d = bdev->bd_disk->private_data;
ulong flags;
lock_kernel();
spin_lock_irqsave(&d->lock, flags);
if (d->flags & DEVFL_UP) {
d->nopen++;
spin_unlock_irqrestore(&d->lock, flags);
unlock_kernel();
return 0;
}
spin_unlock_irqrestore(&d->lock, flags);
unlock_kernel();
return -ENODEV;
}
......@@ -173,7 +177,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
BUG();
bio_endio(bio, -ENXIO);
return 0;
} else if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
} else if (bio->bi_rw & REQ_HARDBARRIER) {
bio_endio(bio, -EOPNOTSUPP);
return 0;
} else if (bio->bi_io_vec == NULL) {
......
......@@ -67,6 +67,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/smp_lock.h>
#include <asm/atafd.h>
#include <asm/atafdreg.h>
......@@ -359,7 +360,7 @@ static void finish_fdc( void );
static void finish_fdc_done( int dummy );
static void setup_req_params( int drive );
static void redo_fd_request( void);
static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
cmd, unsigned long param);
static void fd_probe( int drive );
static int fd_test_drive_present( int drive );
......@@ -1480,7 +1481,7 @@ void do_fd_request(struct request_queue * q)
atari_enable_irq( IRQ_MFP_FDC );
}
static int fd_ioctl(struct block_device *bdev, fmode_t mode,
static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long param)
{
struct gendisk *disk = bdev->bd_disk;
......@@ -1665,6 +1666,17 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode,
}
}
static int fd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
int ret;
lock_kernel();
ret = fd_locked_ioctl(bdev, mode, cmd, arg);
unlock_kernel();
return ret;
}
/* Initialize the 'unit' variable for drive 'drive' */
......@@ -1838,24 +1850,36 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
return 0;
}
static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
{
int ret;
lock_kernel();
ret = floppy_open(bdev, mode);
unlock_kernel();
return ret;
}
static int floppy_release(struct gendisk *disk, fmode_t mode)
{
struct atari_floppy_struct *p = disk->private_data;
lock_kernel();
if (p->ref < 0)
p->ref = 0;
else if (!p->ref--) {
printk(KERN_ERR "floppy_release with fd_ref == 0");
p->ref = 0;
}
unlock_kernel();
return 0;
}
static const struct block_device_operations floppy_fops = {
.owner = THIS_MODULE,
.open = floppy_open,
.open = floppy_unlocked_open,
.release = floppy_release,
.locked_ioctl = fd_ioctl,
.ioctl = fd_ioctl,
.media_changed = check_floppy_change,
.revalidate_disk= floppy_revalidate,
};
......
......@@ -15,6 +15,7 @@
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/highmem.h>
#include <linux/smp_lock.h>
#include <linux/radix-tree.h>
#include <linux/buffer_head.h> /* invalidate_bh_lrus() */
#include <linux/slab.h>
......@@ -340,7 +341,7 @@ static int brd_make_request(struct request_queue *q, struct bio *bio)
get_capacity(bdev->bd_disk))
goto out;
if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) {
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
err = 0;
discard_from_brd(brd, sector, bio->bi_size);
goto out;
......@@ -401,6 +402,7 @@ static int brd_ioctl(struct block_device *bdev, fmode_t mode,
* ram device BLKFLSBUF has special semantics, we want to actually
* release and destroy the ramdisk data.
*/
lock_kernel();
mutex_lock(&bdev->bd_mutex);
error = -EBUSY;
if (bdev->bd_openers <= 1) {
......@@ -417,13 +419,14 @@ static int brd_ioctl(struct block_device *bdev, fmode_t mode,
error = 0;
}
mutex_unlock(&bdev->bd_mutex);
unlock_kernel();
return error;
}
static const struct block_device_operations brd_fops = {
.owner = THIS_MODULE,
.locked_ioctl = brd_ioctl,
.ioctl = brd_ioctl,
#ifdef CONFIG_BLK_DEV_XIP
.direct_access = brd_direct_access,
#endif
......@@ -479,7 +482,7 @@ static struct brd_device *brd_alloc(int i)
if (!brd->brd_queue)
goto out_free_dev;
blk_queue_make_request(brd->brd_queue, brd_make_request);
blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL);
blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG);
blk_queue_max_hw_sectors(brd->brd_queue, 1024);
blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
......
此差异已折叠。
此差异已折叠。
......@@ -52,8 +52,10 @@
/* Configuration Table */
#define CFGTBL_ChangeReq 0x00000001l
#define CFGTBL_AccCmds 0x00000001l
#define DOORBELL_CTLR_RESET 0x00000004l
#define CFGTBL_Trans_Simple 0x00000002l
#define CFGTBL_Trans_Performant 0x00000004l
#define CFGTBL_BusType_Ultra2 0x00000001l
#define CFGTBL_BusType_Ultra3 0x00000002l
......@@ -173,12 +175,15 @@ typedef struct _SGDescriptor_struct {
* PAD_64 can be adjusted independently as needed for 32-bit
* and 64-bits systems.
*/
#define COMMANDLIST_ALIGNMENT (8)
#define COMMANDLIST_ALIGNMENT (32)
#define IS_64_BIT ((sizeof(long) - 4)/4)
#define IS_32_BIT (!IS_64_BIT)
#define PAD_32 (0)
#define PAD_64 (4)
#define PADSIZE (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
#define DIRECT_LOOKUP_BIT 0x10
#define DIRECT_LOOKUP_SHIFT 5
typedef struct _CommandList_struct {
CommandListHeader_struct Header;
RequestBlock_struct Request;
......@@ -195,7 +200,7 @@ typedef struct _CommandList_struct {
struct completion *waiting;
int retry_count;
void * scsi_cmd;
char pad[PADSIZE];
char pad[PADSIZE];
} CommandList_struct;
/* Configuration Table Structure */
......@@ -209,12 +214,15 @@ typedef struct _HostWrite_struct {
typedef struct _CfgTable_struct {
BYTE Signature[4];
DWORD SpecValence;
#define SIMPLE_MODE 0x02
#define PERFORMANT_MODE 0x04
#define MEMQ_MODE 0x08
DWORD TransportSupport;
DWORD TransportActive;
HostWrite_struct HostWrite;
DWORD CmdsOutMax;
DWORD BusTypes;
DWORD Reserved;
DWORD TransMethodOffset;
BYTE ServerName[16];
DWORD HeartBeat;
DWORD SCSI_Prefetch;
......@@ -222,6 +230,28 @@ typedef struct _CfgTable_struct {
DWORD MaxLogicalUnits;
DWORD MaxPhysicalDrives;
DWORD MaxPhysicalDrivesPerLogicalUnit;
DWORD MaxPerformantModeCommands;
u8 reserved[0x78 - 0x58];
u32 misc_fw_support; /* offset 0x78 */
#define MISC_FW_DOORBELL_RESET (0x02)
} CfgTable_struct;
struct TransTable_struct {
u32 BlockFetch0;
u32 BlockFetch1;
u32 BlockFetch2;
u32 BlockFetch3;
u32 BlockFetch4;
u32 BlockFetch5;
u32 BlockFetch6;
u32 BlockFetch7;
u32 RepQSize;
u32 RepQCount;
u32 RepQCtrAddrLow32;
u32 RepQCtrAddrHigh32;
u32 RepQAddr0Low32;
u32 RepQAddr0High32;
};
#pragma pack()
#endif /* CCISS_CMD_H */
此差异已折叠。
此差异已折叠。
......@@ -79,8 +79,8 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
md_io.error = 0;
if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags))
rw |= (1 << BIO_RW_BARRIER);
rw |= ((1<<BIO_RW_UNPLUG) | (1<<BIO_RW_SYNCIO));
rw |= REQ_HARDBARRIER;
rw |= REQ_UNPLUG | REQ_SYNC;
retry:
bio = bio_alloc(GFP_NOIO, 1);
......@@ -103,11 +103,11 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
/* check for unsupported barrier op.
* would rather check on EOPNOTSUPP, but that is not reliable.
* don't try again for ANY return value != 0 */
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && !ok)) {
if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) {
/* Try again with no barrier */
dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
set_bit(MD_NO_BARRIER, &mdev->flags);
rw &= ~(1 << BIO_RW_BARRIER);
rw &= ~REQ_HARDBARRIER;
bio_put(bio);
goto retry;
}
......
此差异已折叠。
此差异已折叠。
......@@ -1557,10 +1557,6 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
sc.rate = DRBD_RATE_DEF;
sc.after = DRBD_AFTER_DEF;
sc.al_extents = DRBD_AL_EXTENTS_DEF;
sc.dp_volume = DRBD_DP_VOLUME_DEF;
sc.dp_interval = DRBD_DP_INTERVAL_DEF;
sc.throttle_th = DRBD_RS_THROTTLE_TH_DEF;
sc.hold_off_th = DRBD_RS_HOLD_OFF_TH_DEF;
} else
memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
......
此差异已折叠。
此差异已折叠。
......@@ -997,7 +997,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
* because of those XXX, this is not yet enabled,
* i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit.
*/
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags))) {
if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) {
/* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */
bio_endio(bio, -EOPNOTSUPP);
return 0;
......
此差异已折叠。
此差异已折叠。
......@@ -627,7 +627,7 @@ static void hd_request(void)
req_data_dir(req) == READ ? "read" : "writ",
cyl, head, sec, nsect, req->buffer);
#endif
if (blk_fs_request(req)) {
if (req->cmd_type == REQ_TYPE_FS) {
switch (rq_data_dir(req)) {
case READ:
hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ,
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册