Merge branch 'block-2.6.24' of git://git.kernel.dk/data/git/linux-2.6-block

* 'block-2.6.24' of git://git.kernel.dk/data/git/linux-2.6-block: (37 commits)
  [BLOCK] Fix failing compile with BLK_DEV_IO_TRACE=n
  compat_ioctl: move floppy handlers to block/compat_ioctl.c
  compat_ioctl: move cdrom handlers to block/compat_ioctl.c
  compat_ioctl: move BLKPG handling to block/compat_ioctl.c
  compat_ioctl: move hdio calls to block/compat_ioctl.c
  compat_ioctl: handle blk_trace ioctls
  compat_ioctl: add compat_blkdev_driver_ioctl()
  compat_ioctl: move common block ioctls to compat_blkdev_ioctl
  Sysace: Don't enable IRQ until after interrupt handler is registered
  Sysace: sparse fixes
  Sysace: Minor coding convention fixup
  drivers/block/umem: use DRIVER_NAME where appropriate
  drivers/block/umem: trim trailing whitespace
  drivers/block/umem: minor cleanups
  drivers/block/umem: use dev_printk()
  drivers/block/umem: move private include away from include/linux
  Sysace: Labels in C code should not be indented.
  Sysace: Add of_platform_bus binding
  Sysace: Move IRQ handler registration to occur after FSM is initialized
  Sysace: minor rework and cleanup changes
  ...
This commit is contained in:
Linus Torvalds 2007-10-11 19:14:22 -07:00
commit 1ce4890428
72 changed files with 1731 additions and 1690 deletions

View File

@ -477,9 +477,9 @@ With this multipage bio design:
the same bi_io_vec array, but with the index and size accordingly modified)
- A linked list of bios is used as before for unrelated merges (*) - this
avoids reallocs and makes independent completions easier to handle.
- Code that traverses the req list needs to make a distinction between
segments of a request (bio_for_each_segment) and the distinct completion
units/bios (rq_for_each_bio).
- Code that traverses the req list can find all the segments of a bio
by using rq_for_each_segment. This handles the fact that a request
has multiple bios, each of which can have multiple segments.
- Drivers which can't process a large bio in one shot can use the bi_idx
field to keep track of the next bio_vec entry to process.
(e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE)
@ -664,13 +664,13 @@ in lvm or md.
3.2.1 Traversing segments and completion units in a request
The macros bio_for_each_segment() and rq_for_each_bio() should be used for
traversing the bios in the request list (drivers should avoid directly
trying to do it themselves). Using these helpers should also make it easier
to cope with block changes in the future.
The macro rq_for_each_segment() should be used for traversing the bios
in the request list (drivers should avoid directly trying to do it
themselves). Using these helpers should also make it easier to cope
with block changes in the future.
rq_for_each_bio(bio, rq)
bio_for_each_segment(bio_vec, bio, i)
struct req_iterator iter;
rq_for_each_segment(bio_vec, rq, iter)
/* bio_vec is now current segment */
I/O completion callbacks are per-bio rather than per-segment, so drivers

View File

@ -86,8 +86,15 @@ extern int sys_ioprio_get(int, int);
#error "Unsupported arch"
#endif
_syscall3(int, ioprio_set, int, which, int, who, int, ioprio);
_syscall2(int, ioprio_get, int, which, int, who);
static inline int ioprio_set(int which, int who, int ioprio)
{
return syscall(__NR_ioprio_set, which, who, ioprio);
}
static inline int ioprio_get(int which, int who)
{
return syscall(__NR_ioprio_get, which, who);
}
enum {
IOPRIO_CLASS_NONE,

View File

@ -4156,6 +4156,13 @@ W: http://oss.sgi.com/projects/xfs
T: git git://oss.sgi.com:8090/xfs/xfs-2.6.git
S: Supported
XILINX SYSTEMACE DRIVER
P: Grant Likely
M: grant.likely@secretlab.ca
W: http://www.secretlab.ca/
L: linux-kernel@vger.kernel.org
S: Maintained
XILINX UARTLITE SERIAL DRIVER
P: Peter Korsgaard
M: jacmet@sunsite.dk

View File

@ -11,3 +11,4 @@ obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
obj-$(CONFIG_COMPAT) += compat_ioctl.o

View File

@ -312,33 +312,26 @@ static struct rchan_callbacks blk_relay_callbacks = {
/*
* Setup everything required to start tracing
*/
static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
char __user *arg)
int do_blk_trace_setup(struct request_queue *q, struct block_device *bdev,
struct blk_user_trace_setup *buts)
{
struct blk_user_trace_setup buts;
struct blk_trace *old_bt, *bt = NULL;
struct dentry *dir = NULL;
char b[BDEVNAME_SIZE];
int ret, i;
if (copy_from_user(&buts, arg, sizeof(buts)))
return -EFAULT;
if (!buts.buf_size || !buts.buf_nr)
if (!buts->buf_size || !buts->buf_nr)
return -EINVAL;
strcpy(buts.name, bdevname(bdev, b));
strcpy(buts->name, bdevname(bdev, b));
/*
* some device names have larger paths - convert the slashes
* to underscores for this to work as expected
*/
for (i = 0; i < strlen(buts.name); i++)
if (buts.name[i] == '/')
buts.name[i] = '_';
if (copy_to_user(arg, &buts, sizeof(buts)))
return -EFAULT;
for (i = 0; i < strlen(buts->name); i++)
if (buts->name[i] == '/')
buts->name[i] = '_';
ret = -ENOMEM;
bt = kzalloc(sizeof(*bt), GFP_KERNEL);
@ -350,7 +343,7 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
goto err;
ret = -ENOENT;
dir = blk_create_tree(buts.name);
dir = blk_create_tree(buts->name);
if (!dir)
goto err;
@ -363,20 +356,21 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
if (!bt->dropped_file)
goto err;
bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, &blk_relay_callbacks, bt);
bt->rchan = relay_open("trace", dir, buts->buf_size,
buts->buf_nr, &blk_relay_callbacks, bt);
if (!bt->rchan)
goto err;
bt->act_mask = buts.act_mask;
bt->act_mask = buts->act_mask;
if (!bt->act_mask)
bt->act_mask = (u16) -1;
bt->start_lba = buts.start_lba;
bt->end_lba = buts.end_lba;
bt->start_lba = buts->start_lba;
bt->end_lba = buts->end_lba;
if (!bt->end_lba)
bt->end_lba = -1ULL;
bt->pid = buts.pid;
bt->pid = buts->pid;
bt->trace_state = Blktrace_setup;
ret = -EBUSY;
@ -401,6 +395,26 @@ err:
return ret;
}
static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
char __user *arg)
{
struct blk_user_trace_setup buts;
int ret;
ret = copy_from_user(&buts, arg, sizeof(buts));
if (ret)
return -EFAULT;
ret = do_blk_trace_setup(q, bdev, &buts);
if (ret)
return ret;
if (copy_to_user(arg, &buts, sizeof(buts)))
return -EFAULT;
return 0;
}
static int blk_trace_startstop(struct request_queue *q, int start)
{
struct blk_trace *bt;

814
block/compat_ioctl.c Normal file
View File

@ -0,0 +1,814 @@
#include <linux/blkdev.h>
#include <linux/blkpg.h>
#include <linux/blktrace_api.h>
#include <linux/cdrom.h>
#include <linux/compat.h>
#include <linux/elevator.h>
#include <linux/fd.h>
#include <linux/hdreg.h>
#include <linux/syscalls.h>
#include <linux/smp_lock.h>
#include <linux/types.h>
#include <linux/uaccess.h>
static int compat_put_ushort(unsigned long arg, unsigned short val)
{
return put_user(val, (unsigned short __user *)compat_ptr(arg));
}
static int compat_put_int(unsigned long arg, int val)
{
return put_user(val, (compat_int_t __user *)compat_ptr(arg));
}
static int compat_put_long(unsigned long arg, long val)
{
return put_user(val, (compat_long_t __user *)compat_ptr(arg));
}
static int compat_put_ulong(unsigned long arg, compat_ulong_t val)
{
return put_user(val, (compat_ulong_t __user *)compat_ptr(arg));
}
static int compat_put_u64(unsigned long arg, u64 val)
{
return put_user(val, (compat_u64 __user *)compat_ptr(arg));
}
struct compat_hd_geometry {
unsigned char heads;
unsigned char sectors;
unsigned short cylinders;
u32 start;
};
static int compat_hdio_getgeo(struct gendisk *disk, struct block_device *bdev,
struct compat_hd_geometry __user *ugeo)
{
struct hd_geometry geo;
int ret;
if (!ugeo)
return -EINVAL;
if (!disk->fops->getgeo)
return -ENOTTY;
/*
* We need to set the startsect first, the driver may
* want to override it.
*/
geo.start = get_start_sect(bdev);
ret = disk->fops->getgeo(bdev, &geo);
if (ret)
return ret;
ret = copy_to_user(ugeo, &geo, 4);
ret |= __put_user(geo.start, &ugeo->start);
if (ret)
ret = -EFAULT;
return ret;
}
static int compat_hdio_ioctl(struct inode *inode, struct file *file,
struct gendisk *disk, unsigned int cmd, unsigned long arg)
{
mm_segment_t old_fs = get_fs();
unsigned long kval;
unsigned int __user *uvp;
int error;
set_fs(KERNEL_DS);
error = blkdev_driver_ioctl(inode, file, disk,
cmd, (unsigned long)(&kval));
set_fs(old_fs);
if (error == 0) {
uvp = compat_ptr(arg);
if (put_user(kval, uvp))
error = -EFAULT;
}
return error;
}
struct compat_cdrom_read_audio {
union cdrom_addr addr;
u8 addr_format;
compat_int_t nframes;
compat_caddr_t buf;
};
struct compat_cdrom_generic_command {
unsigned char cmd[CDROM_PACKET_SIZE];
compat_caddr_t buffer;
compat_uint_t buflen;
compat_int_t stat;
compat_caddr_t sense;
unsigned char data_direction;
compat_int_t quiet;
compat_int_t timeout;
compat_caddr_t reserved[1];
};
static int compat_cdrom_read_audio(struct inode *inode, struct file *file,
struct gendisk *disk, unsigned int cmd, unsigned long arg)
{
struct cdrom_read_audio __user *cdread_audio;
struct compat_cdrom_read_audio __user *cdread_audio32;
__u32 data;
void __user *datap;
cdread_audio = compat_alloc_user_space(sizeof(*cdread_audio));
cdread_audio32 = compat_ptr(arg);
if (copy_in_user(&cdread_audio->addr,
&cdread_audio32->addr,
(sizeof(*cdread_audio32) -
sizeof(compat_caddr_t))))
return -EFAULT;
if (get_user(data, &cdread_audio32->buf))
return -EFAULT;
datap = compat_ptr(data);
if (put_user(datap, &cdread_audio->buf))
return -EFAULT;
return blkdev_driver_ioctl(inode, file, disk, cmd,
(unsigned long)cdread_audio);
}
static int compat_cdrom_generic_command(struct inode *inode, struct file *file,
struct gendisk *disk, unsigned int cmd, unsigned long arg)
{
struct cdrom_generic_command __user *cgc;
struct compat_cdrom_generic_command __user *cgc32;
u32 data;
unsigned char dir;
int itmp;
cgc = compat_alloc_user_space(sizeof(*cgc));
cgc32 = compat_ptr(arg);
if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
get_user(data, &cgc32->buffer) ||
put_user(compat_ptr(data), &cgc->buffer) ||
copy_in_user(&cgc->buflen, &cgc32->buflen,
(sizeof(unsigned int) + sizeof(int))) ||
get_user(data, &cgc32->sense) ||
put_user(compat_ptr(data), &cgc->sense) ||
get_user(dir, &cgc32->data_direction) ||
put_user(dir, &cgc->data_direction) ||
get_user(itmp, &cgc32->quiet) ||
put_user(itmp, &cgc->quiet) ||
get_user(itmp, &cgc32->timeout) ||
put_user(itmp, &cgc->timeout) ||
get_user(data, &cgc32->reserved[0]) ||
put_user(compat_ptr(data), &cgc->reserved[0]))
return -EFAULT;
return blkdev_driver_ioctl(inode, file, disk, cmd, (unsigned long)cgc);
}
struct compat_blkpg_ioctl_arg {
compat_int_t op;
compat_int_t flags;
compat_int_t datalen;
compat_caddr_t data;
};
static int compat_blkpg_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, struct compat_blkpg_ioctl_arg __user *ua32)
{
struct blkpg_ioctl_arg __user *a = compat_alloc_user_space(sizeof(*a));
compat_caddr_t udata;
compat_int_t n;
int err;
err = get_user(n, &ua32->op);
err |= put_user(n, &a->op);
err |= get_user(n, &ua32->flags);
err |= put_user(n, &a->flags);
err |= get_user(n, &ua32->datalen);
err |= put_user(n, &a->datalen);
err |= get_user(udata, &ua32->data);
err |= put_user(compat_ptr(udata), &a->data);
if (err)
return err;
return blkdev_ioctl(inode, file, cmd, (unsigned long)a);
}
#define BLKBSZGET_32 _IOR(0x12, 112, int)
#define BLKBSZSET_32 _IOW(0x12, 113, int)
#define BLKGETSIZE64_32 _IOR(0x12, 114, int)
struct compat_floppy_struct {
compat_uint_t size;
compat_uint_t sect;
compat_uint_t head;
compat_uint_t track;
compat_uint_t stretch;
unsigned char gap;
unsigned char rate;
unsigned char spec1;
unsigned char fmt_gap;
const compat_caddr_t name;
};
struct compat_floppy_drive_params {
char cmos;
compat_ulong_t max_dtr;
compat_ulong_t hlt;
compat_ulong_t hut;
compat_ulong_t srt;
compat_ulong_t spinup;
compat_ulong_t spindown;
unsigned char spindown_offset;
unsigned char select_delay;
unsigned char rps;
unsigned char tracks;
compat_ulong_t timeout;
unsigned char interleave_sect;
struct floppy_max_errors max_errors;
char flags;
char read_track;
short autodetect[8];
compat_int_t checkfreq;
compat_int_t native_format;
};
struct compat_floppy_drive_struct {
signed char flags;
compat_ulong_t spinup_date;
compat_ulong_t select_date;
compat_ulong_t first_read_date;
short probed_format;
short track;
short maxblock;
short maxtrack;
compat_int_t generation;
compat_int_t keep_data;
compat_int_t fd_ref;
compat_int_t fd_device;
compat_int_t last_checked;
compat_caddr_t dmabuf;
compat_int_t bufblocks;
};
struct compat_floppy_fdc_state {
compat_int_t spec1;
compat_int_t spec2;
compat_int_t dtr;
unsigned char version;
unsigned char dor;
compat_ulong_t address;
unsigned int rawcmd:2;
unsigned int reset:1;
unsigned int need_configure:1;
unsigned int perp_mode:2;
unsigned int has_fifo:1;
unsigned int driver_version;
unsigned char track[4];
};
struct compat_floppy_write_errors {
unsigned int write_errors;
compat_ulong_t first_error_sector;
compat_int_t first_error_generation;
compat_ulong_t last_error_sector;
compat_int_t last_error_generation;
compat_uint_t badness;
};
#define FDSETPRM32 _IOW(2, 0x42, struct compat_floppy_struct)
#define FDDEFPRM32 _IOW(2, 0x43, struct compat_floppy_struct)
#define FDGETPRM32 _IOR(2, 0x04, struct compat_floppy_struct)
#define FDSETDRVPRM32 _IOW(2, 0x90, struct compat_floppy_drive_params)
#define FDGETDRVPRM32 _IOR(2, 0x11, struct compat_floppy_drive_params)
#define FDGETDRVSTAT32 _IOR(2, 0x12, struct compat_floppy_drive_struct)
#define FDPOLLDRVSTAT32 _IOR(2, 0x13, struct compat_floppy_drive_struct)
#define FDGETFDCSTAT32 _IOR(2, 0x15, struct compat_floppy_fdc_state)
#define FDWERRORGET32 _IOR(2, 0x17, struct compat_floppy_write_errors)
static struct {
unsigned int cmd32;
unsigned int cmd;
} fd_ioctl_trans_table[] = {
{ FDSETPRM32, FDSETPRM },
{ FDDEFPRM32, FDDEFPRM },
{ FDGETPRM32, FDGETPRM },
{ FDSETDRVPRM32, FDSETDRVPRM },
{ FDGETDRVPRM32, FDGETDRVPRM },
{ FDGETDRVSTAT32, FDGETDRVSTAT },
{ FDPOLLDRVSTAT32, FDPOLLDRVSTAT },
{ FDGETFDCSTAT32, FDGETFDCSTAT },
{ FDWERRORGET32, FDWERRORGET }
};
#define NR_FD_IOCTL_TRANS ARRAY_SIZE(fd_ioctl_trans_table)
static int compat_fd_ioctl(struct inode *inode, struct file *file,
struct gendisk *disk, unsigned int cmd, unsigned long arg)
{
mm_segment_t old_fs = get_fs();
void *karg = NULL;
unsigned int kcmd = 0;
int i, err;
for (i = 0; i < NR_FD_IOCTL_TRANS; i++)
if (cmd == fd_ioctl_trans_table[i].cmd32) {
kcmd = fd_ioctl_trans_table[i].cmd;
break;
}
if (!kcmd)
return -EINVAL;
switch (cmd) {
case FDSETPRM32:
case FDDEFPRM32:
case FDGETPRM32:
{
compat_uptr_t name;
struct compat_floppy_struct __user *uf;
struct floppy_struct *f;
uf = compat_ptr(arg);
f = karg = kmalloc(sizeof(struct floppy_struct), GFP_KERNEL);
if (!karg)
return -ENOMEM;
if (cmd == FDGETPRM32)
break;
err = __get_user(f->size, &uf->size);
err |= __get_user(f->sect, &uf->sect);
err |= __get_user(f->head, &uf->head);
err |= __get_user(f->track, &uf->track);
err |= __get_user(f->stretch, &uf->stretch);
err |= __get_user(f->gap, &uf->gap);
err |= __get_user(f->rate, &uf->rate);
err |= __get_user(f->spec1, &uf->spec1);
err |= __get_user(f->fmt_gap, &uf->fmt_gap);
err |= __get_user(name, &uf->name);
f->name = compat_ptr(name);
if (err) {
err = -EFAULT;
goto out;
}
break;
}
case FDSETDRVPRM32:
case FDGETDRVPRM32:
{
struct compat_floppy_drive_params __user *uf;
struct floppy_drive_params *f;
uf = compat_ptr(arg);
f = karg = kmalloc(sizeof(struct floppy_drive_params), GFP_KERNEL);
if (!karg)
return -ENOMEM;
if (cmd == FDGETDRVPRM32)
break;
err = __get_user(f->cmos, &uf->cmos);
err |= __get_user(f->max_dtr, &uf->max_dtr);
err |= __get_user(f->hlt, &uf->hlt);
err |= __get_user(f->hut, &uf->hut);
err |= __get_user(f->srt, &uf->srt);
err |= __get_user(f->spinup, &uf->spinup);
err |= __get_user(f->spindown, &uf->spindown);
err |= __get_user(f->spindown_offset, &uf->spindown_offset);
err |= __get_user(f->select_delay, &uf->select_delay);
err |= __get_user(f->rps, &uf->rps);
err |= __get_user(f->tracks, &uf->tracks);
err |= __get_user(f->timeout, &uf->timeout);
err |= __get_user(f->interleave_sect, &uf->interleave_sect);
err |= __copy_from_user(&f->max_errors, &uf->max_errors, sizeof(f->max_errors));
err |= __get_user(f->flags, &uf->flags);
err |= __get_user(f->read_track, &uf->read_track);
err |= __copy_from_user(f->autodetect, uf->autodetect, sizeof(f->autodetect));
err |= __get_user(f->checkfreq, &uf->checkfreq);
err |= __get_user(f->native_format, &uf->native_format);
if (err) {
err = -EFAULT;
goto out;
}
break;
}
case FDGETDRVSTAT32:
case FDPOLLDRVSTAT32:
karg = kmalloc(sizeof(struct floppy_drive_struct), GFP_KERNEL);
if (!karg)
return -ENOMEM;
break;
case FDGETFDCSTAT32:
karg = kmalloc(sizeof(struct floppy_fdc_state), GFP_KERNEL);
if (!karg)
return -ENOMEM;
break;
case FDWERRORGET32:
karg = kmalloc(sizeof(struct floppy_write_errors), GFP_KERNEL);
if (!karg)
return -ENOMEM;
break;
default:
return -EINVAL;
}
set_fs(KERNEL_DS);
err = blkdev_driver_ioctl(inode, file, disk, kcmd, (unsigned long)karg);
set_fs(old_fs);
if (err)
goto out;
switch (cmd) {
case FDGETPRM32:
{
struct floppy_struct *f = karg;
struct compat_floppy_struct __user *uf = compat_ptr(arg);
err = __put_user(f->size, &uf->size);
err |= __put_user(f->sect, &uf->sect);
err |= __put_user(f->head, &uf->head);
err |= __put_user(f->track, &uf->track);
err |= __put_user(f->stretch, &uf->stretch);
err |= __put_user(f->gap, &uf->gap);
err |= __put_user(f->rate, &uf->rate);
err |= __put_user(f->spec1, &uf->spec1);
err |= __put_user(f->fmt_gap, &uf->fmt_gap);
err |= __put_user((u64)f->name, (compat_caddr_t __user *)&uf->name);
break;
}
case FDGETDRVPRM32:
{
struct compat_floppy_drive_params __user *uf;
struct floppy_drive_params *f = karg;
uf = compat_ptr(arg);
err = __put_user(f->cmos, &uf->cmos);
err |= __put_user(f->max_dtr, &uf->max_dtr);
err |= __put_user(f->hlt, &uf->hlt);
err |= __put_user(f->hut, &uf->hut);
err |= __put_user(f->srt, &uf->srt);
err |= __put_user(f->spinup, &uf->spinup);
err |= __put_user(f->spindown, &uf->spindown);
err |= __put_user(f->spindown_offset, &uf->spindown_offset);
err |= __put_user(f->select_delay, &uf->select_delay);
err |= __put_user(f->rps, &uf->rps);
err |= __put_user(f->tracks, &uf->tracks);
err |= __put_user(f->timeout, &uf->timeout);
err |= __put_user(f->interleave_sect, &uf->interleave_sect);
err |= __copy_to_user(&uf->max_errors, &f->max_errors, sizeof(f->max_errors));
err |= __put_user(f->flags, &uf->flags);
err |= __put_user(f->read_track, &uf->read_track);
err |= __copy_to_user(uf->autodetect, f->autodetect, sizeof(f->autodetect));
err |= __put_user(f->checkfreq, &uf->checkfreq);
err |= __put_user(f->native_format, &uf->native_format);
break;
}
case FDGETDRVSTAT32:
case FDPOLLDRVSTAT32:
{
struct compat_floppy_drive_struct __user *uf;
struct floppy_drive_struct *f = karg;
uf = compat_ptr(arg);
err = __put_user(f->flags, &uf->flags);
err |= __put_user(f->spinup_date, &uf->spinup_date);
err |= __put_user(f->select_date, &uf->select_date);
err |= __put_user(f->first_read_date, &uf->first_read_date);
err |= __put_user(f->probed_format, &uf->probed_format);
err |= __put_user(f->track, &uf->track);
err |= __put_user(f->maxblock, &uf->maxblock);
err |= __put_user(f->maxtrack, &uf->maxtrack);
err |= __put_user(f->generation, &uf->generation);
err |= __put_user(f->keep_data, &uf->keep_data);
err |= __put_user(f->fd_ref, &uf->fd_ref);
err |= __put_user(f->fd_device, &uf->fd_device);
err |= __put_user(f->last_checked, &uf->last_checked);
err |= __put_user((u64)f->dmabuf, &uf->dmabuf);
err |= __put_user((u64)f->bufblocks, &uf->bufblocks);
break;
}
case FDGETFDCSTAT32:
{
struct compat_floppy_fdc_state __user *uf;
struct floppy_fdc_state *f = karg;
uf = compat_ptr(arg);
err = __put_user(f->spec1, &uf->spec1);
err |= __put_user(f->spec2, &uf->spec2);
err |= __put_user(f->dtr, &uf->dtr);
err |= __put_user(f->version, &uf->version);
err |= __put_user(f->dor, &uf->dor);
err |= __put_user(f->address, &uf->address);
err |= __copy_to_user((char __user *)&uf->address + sizeof(uf->address),
(char *)&f->address + sizeof(f->address), sizeof(int));
err |= __put_user(f->driver_version, &uf->driver_version);
err |= __copy_to_user(uf->track, f->track, sizeof(f->track));
break;
}
case FDWERRORGET32:
{
struct compat_floppy_write_errors __user *uf;
struct floppy_write_errors *f = karg;
uf = compat_ptr(arg);
err = __put_user(f->write_errors, &uf->write_errors);
err |= __put_user(f->first_error_sector, &uf->first_error_sector);
err |= __put_user(f->first_error_generation, &uf->first_error_generation);
err |= __put_user(f->last_error_sector, &uf->last_error_sector);
err |= __put_user(f->last_error_generation, &uf->last_error_generation);
err |= __put_user(f->badness, &uf->badness);
break;
}
default:
break;
}
if (err)
err = -EFAULT;
out:
kfree(karg);
return err;
}
struct compat_blk_user_trace_setup {
char name[32];
u16 act_mask;
u32 buf_size;
u32 buf_nr;
compat_u64 start_lba;
compat_u64 end_lba;
u32 pid;
};
#define BLKTRACESETUP32 _IOWR(0x12, 115, struct compat_blk_user_trace_setup)
static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
{
struct blk_user_trace_setup buts;
struct compat_blk_user_trace_setup cbuts;
struct request_queue *q;
int ret;
q = bdev_get_queue(bdev);
if (!q)
return -ENXIO;
if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
return -EFAULT;
buts = (struct blk_user_trace_setup) {
.act_mask = cbuts.act_mask,
.buf_size = cbuts.buf_size,
.buf_nr = cbuts.buf_nr,
.start_lba = cbuts.start_lba,
.end_lba = cbuts.end_lba,
.pid = cbuts.pid,
};
memcpy(&buts.name, &cbuts.name, 32);
mutex_lock(&bdev->bd_mutex);
ret = do_blk_trace_setup(q, bdev, &buts);
mutex_unlock(&bdev->bd_mutex);
if (ret)
return ret;
if (copy_to_user(arg, &buts.name, 32))
return -EFAULT;
return 0;
}
static int compat_blkdev_driver_ioctl(struct inode *inode, struct file *file,
struct gendisk *disk, unsigned cmd, unsigned long arg)
{
int ret;
switch (arg) {
case HDIO_GET_UNMASKINTR:
case HDIO_GET_MULTCOUNT:
case HDIO_GET_KEEPSETTINGS:
case HDIO_GET_32BIT:
case HDIO_GET_NOWERR:
case HDIO_GET_DMA:
case HDIO_GET_NICE:
case HDIO_GET_WCACHE:
case HDIO_GET_ACOUSTIC:
case HDIO_GET_ADDRESS:
case HDIO_GET_BUSSTATE:
return compat_hdio_ioctl(inode, file, disk, cmd, arg);
case FDSETPRM32:
case FDDEFPRM32:
case FDGETPRM32:
case FDSETDRVPRM32:
case FDGETDRVPRM32:
case FDGETDRVSTAT32:
case FDPOLLDRVSTAT32:
case FDGETFDCSTAT32:
case FDWERRORGET32:
return compat_fd_ioctl(inode, file, disk, cmd, arg);
case CDROMREADAUDIO:
return compat_cdrom_read_audio(inode, file, disk, cmd, arg);
case CDROM_SEND_PACKET:
return compat_cdrom_generic_command(inode, file, disk, cmd, arg);
/*
* No handler required for the ones below, we just need to
* convert arg to a 64 bit pointer.
*/
case BLKSECTSET:
/*
* 0x03 -- HD/IDE ioctl's used by hdparm and friends.
* Some need translations, these do not.
*/
case HDIO_GET_IDENTITY:
case HDIO_DRIVE_TASK:
case HDIO_DRIVE_CMD:
case HDIO_SCAN_HWIF:
/* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */
case 0x330:
/* 0x02 -- Floppy ioctls */
case FDMSGON:
case FDMSGOFF:
case FDSETEMSGTRESH:
case FDFLUSH:
case FDWERRORCLR:
case FDSETMAXERRS:
case FDGETMAXERRS:
case FDGETDRVTYP:
case FDEJECT:
case FDCLRPRM:
case FDFMTBEG:
case FDFMTEND:
case FDRESET:
case FDTWADDLE:
case FDFMTTRK:
case FDRAWCMD:
/* CDROM stuff */
case CDROMPAUSE:
case CDROMRESUME:
case CDROMPLAYMSF:
case CDROMPLAYTRKIND:
case CDROMREADTOCHDR:
case CDROMREADTOCENTRY:
case CDROMSTOP:
case CDROMSTART:
case CDROMEJECT:
case CDROMVOLCTRL:
case CDROMSUBCHNL:
case CDROMMULTISESSION:
case CDROM_GET_MCN:
case CDROMRESET:
case CDROMVOLREAD:
case CDROMSEEK:
case CDROMPLAYBLK:
case CDROMCLOSETRAY:
case CDROM_DISC_STATUS:
case CDROM_CHANGER_NSLOTS:
case CDROM_GET_CAPABILITY:
/* Ignore cdrom.h about these next 5 ioctls, they absolutely do
* not take a struct cdrom_read, instead they take a struct cdrom_msf
* which is compatible.
*/
case CDROMREADMODE2:
case CDROMREADMODE1:
case CDROMREADRAW:
case CDROMREADCOOKED:
case CDROMREADALL:
/* DVD ioctls */
case DVD_READ_STRUCT:
case DVD_WRITE_STRUCT:
case DVD_AUTH:
arg = (unsigned long)compat_ptr(arg);
/* These intepret arg as an unsigned long, not as a pointer,
* so we must not do compat_ptr() conversion. */
case HDIO_SET_MULTCOUNT:
case HDIO_SET_UNMASKINTR:
case HDIO_SET_KEEPSETTINGS:
case HDIO_SET_32BIT:
case HDIO_SET_NOWERR:
case HDIO_SET_DMA:
case HDIO_SET_PIO_MODE:
case HDIO_SET_NICE:
case HDIO_SET_WCACHE:
case HDIO_SET_ACOUSTIC:
case HDIO_SET_BUSSTATE:
case HDIO_SET_ADDRESS:
case CDROMEJECT_SW:
case CDROM_SET_OPTIONS:
case CDROM_CLEAR_OPTIONS:
case CDROM_SELECT_SPEED:
case CDROM_SELECT_DISC:
case CDROM_MEDIA_CHANGED:
case CDROM_DRIVE_STATUS:
case CDROM_LOCKDOOR:
case CDROM_DEBUG:
break;
default:
/* unknown ioctl number */
return -ENOIOCTLCMD;
}
if (disk->fops->unlocked_ioctl)
return disk->fops->unlocked_ioctl(file, cmd, arg);
if (disk->fops->ioctl) {
lock_kernel();
ret = disk->fops->ioctl(inode, file, cmd, arg);
unlock_kernel();
return ret;
}
return -ENOTTY;
}
static int compat_blkdev_locked_ioctl(struct inode *inode, struct file *file,
struct block_device *bdev,
unsigned cmd, unsigned long arg)
{
struct backing_dev_info *bdi;
switch (cmd) {
case BLKRAGET:
case BLKFRAGET:
if (!arg)
return -EINVAL;
bdi = blk_get_backing_dev_info(bdev);
if (bdi == NULL)
return -ENOTTY;
return compat_put_long(arg,
(bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
case BLKROGET: /* compatible */
return compat_put_int(arg, bdev_read_only(bdev) != 0);
case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
return compat_put_int(arg, block_size(bdev));
case BLKSSZGET: /* get block device hardware sector size */
return compat_put_int(arg, bdev_hardsect_size(bdev));
case BLKSECTGET:
return compat_put_ushort(arg,
bdev_get_queue(bdev)->max_sectors);
case BLKRASET: /* compatible, but no compat_ptr (!) */
case BLKFRASET:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
bdi = blk_get_backing_dev_info(bdev);
if (bdi == NULL)
return -ENOTTY;
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
return 0;
case BLKGETSIZE:
if ((bdev->bd_inode->i_size >> 9) > ~0UL)
return -EFBIG;
return compat_put_ulong(arg, bdev->bd_inode->i_size >> 9);
case BLKGETSIZE64_32:
return compat_put_u64(arg, bdev->bd_inode->i_size);
case BLKTRACESETUP32:
return compat_blk_trace_setup(bdev, compat_ptr(arg));
case BLKTRACESTART: /* compatible */
case BLKTRACESTOP: /* compatible */
case BLKTRACETEARDOWN: /* compatible */
return blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
}
return -ENOIOCTLCMD;
}
/* Most of the generic ioctls are handled in the normal fallback path.
This assumes the blkdev's low level compat_ioctl always returns
ENOIOCTLCMD for unknown ioctls. */
long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
int ret = -ENOIOCTLCMD;
struct inode *inode = file->f_mapping->host;
struct block_device *bdev = inode->i_bdev;
struct gendisk *disk = bdev->bd_disk;
switch (cmd) {
case HDIO_GETGEO:
return compat_hdio_getgeo(disk, bdev, compat_ptr(arg));
case BLKFLSBUF:
case BLKROSET:
/*
* the ones below are implemented in blkdev_locked_ioctl,
* but we call blkdev_ioctl, which gets the lock for us
*/
case BLKRRPART:
return blkdev_ioctl(inode, file, cmd,
(unsigned long)compat_ptr(arg));
case BLKBSZSET_32:
return blkdev_ioctl(inode, file, BLKBSZSET,
(unsigned long)compat_ptr(arg));
case BLKPG:
return compat_blkpg_ioctl(inode, file, cmd, compat_ptr(arg));
}
lock_kernel();
ret = compat_blkdev_locked_ioctl(inode, file, bdev, cmd, arg);
/* FIXME: why do we assume -> compat_ioctl needs the BKL? */
if (ret == -ENOIOCTLCMD && disk->fops->compat_ioctl)
ret = disk->fops->compat_ioctl(file, cmd, arg);
unlock_kernel();
if (ret != -ENOIOCTLCMD)
return ret;
return compat_blkdev_driver_ioctl(inode, file, disk, cmd, arg);
}

View File

@ -217,6 +217,10 @@ int blkdev_driver_ioctl(struct inode *inode, struct file *file,
}
EXPORT_SYMBOL_GPL(blkdev_driver_ioctl);
/*
* always keep this in sync with compat_blkdev_ioctl() and
* compat_blkdev_locked_ioctl()
*/
int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
unsigned long arg)
{
@ -284,21 +288,4 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
return blkdev_driver_ioctl(inode, file, disk, cmd, arg);
}
/* Most of the generic ioctls are handled in the normal fallback path.
This assumes the blkdev's low level compat_ioctl always returns
ENOIOCTLCMD for unknown ioctls. */
long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
struct block_device *bdev = file->f_path.dentry->d_inode->i_bdev;
struct gendisk *disk = bdev->bd_disk;
int ret = -ENOIOCTLCMD;
if (disk->fops->compat_ioctl) {
lock_kernel();
ret = disk->fops->compat_ioctl(file, cmd, arg);
unlock_kernel();
}
return ret;
}
EXPORT_SYMBOL_GPL(blkdev_ioctl);

View File

@ -42,6 +42,9 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
static void init_request_from_bio(struct request *req, struct bio *bio);
static int __make_request(struct request_queue *q, struct bio *bio);
static struct io_context *current_io_context(gfp_t gfp_flags, int node);
static void blk_recalc_rq_segments(struct request *rq);
static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
/*
* For the allocated request tables
@ -428,7 +431,6 @@ static void queue_flush(struct request_queue *q, unsigned which)
static inline struct request *start_ordered(struct request_queue *q,
struct request *rq)
{
q->bi_size = 0;
q->orderr = 0;
q->ordered = q->next_ordered;
q->ordseq |= QUEUE_ORDSEQ_STARTED;
@ -525,56 +527,36 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
return 1;
}
static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
{
struct request_queue *q = bio->bi_private;
/*
* This is dry run, restore bio_sector and size. We'll finish
* this request again with the original bi_end_io after an
* error occurs or post flush is complete.
*/
q->bi_size += bytes;
if (bio->bi_size)
return 1;
/* Reset bio */
set_bit(BIO_UPTODATE, &bio->bi_flags);
bio->bi_size = q->bi_size;
bio->bi_sector -= (q->bi_size >> 9);
q->bi_size = 0;
return 0;
}
static int ordered_bio_endio(struct request *rq, struct bio *bio,
static void req_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, int error)
{
struct request_queue *q = rq->q;
bio_end_io_t *endio;
void *private;
if (&q->bar_rq != rq)
return 0;
if (&q->bar_rq != rq) {
if (error)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
error = -EIO;
if (unlikely(nbytes > bio->bi_size)) {
printk("%s: want %u bytes done, only %u left\n",
__FUNCTION__, nbytes, bio->bi_size);
nbytes = bio->bi_size;
}
bio->bi_size -= nbytes;
bio->bi_sector += (nbytes >> 9);
if (bio->bi_size == 0)
bio_endio(bio, error);
} else {
/*
* Okay, this is the barrier request in progress, dry finish it.
* Okay, this is the barrier request in progress, just
* record the error;
*/
if (error && !q->orderr)
q->orderr = error;
endio = bio->bi_end_io;
private = bio->bi_private;
bio->bi_end_io = flush_dry_bio_endio;
bio->bi_private = q;
bio_endio(bio, nbytes, error);
bio->bi_end_io = endio;
bio->bi_private = private;
return 1;
}
}
/**
@ -1220,16 +1202,40 @@ EXPORT_SYMBOL(blk_dump_rq_flags);
void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
struct bio_vec *bv, *bvprv = NULL;
int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;
int high, highprv = 1;
struct request rq;
struct bio *nxt = bio->bi_next;
rq.q = q;
rq.bio = rq.biotail = bio;
bio->bi_next = NULL;
blk_recalc_rq_segments(&rq);
bio->bi_next = nxt;
bio->bi_phys_segments = rq.nr_phys_segments;
bio->bi_hw_segments = rq.nr_hw_segments;
bio->bi_flags |= (1 << BIO_SEG_VALID);
}
EXPORT_SYMBOL(blk_recount_segments);
if (unlikely(!bio->bi_io_vec))
static void blk_recalc_rq_segments(struct request *rq)
{
int nr_phys_segs;
int nr_hw_segs;
unsigned int phys_size;
unsigned int hw_size;
struct bio_vec *bv, *bvprv = NULL;
int seg_size;
int hw_seg_size;
int cluster;
struct req_iterator iter;
int high, highprv = 1;
struct request_queue *q = rq->q;
if (!rq->bio)
return;
cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
hw_seg_size = seg_size = nr_phys_segs = nr_hw_segs = 0;
bio_for_each_segment(bv, bio, i) {
hw_seg_size = seg_size = 0;
phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
rq_for_each_segment(bv, rq, iter) {
/*
* the trick here is making sure that a high page is never
* considered part of another segment, since that might
@ -1255,12 +1261,13 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
}
new_segment:
if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
!BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) {
!BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
hw_seg_size += bv->bv_len;
} else {
else {
new_hw_segment:
if (hw_seg_size > bio->bi_hw_front_size)
bio->bi_hw_front_size = hw_seg_size;
if (nr_hw_segs == 1 &&
hw_seg_size > rq->bio->bi_hw_front_size)
rq->bio->bi_hw_front_size = hw_seg_size;
hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
nr_hw_segs++;
}
@ -1270,15 +1277,15 @@ new_hw_segment:
seg_size = bv->bv_len;
highprv = high;
}
if (hw_seg_size > bio->bi_hw_back_size)
bio->bi_hw_back_size = hw_seg_size;
if (nr_hw_segs == 1 && hw_seg_size > bio->bi_hw_front_size)
bio->bi_hw_front_size = hw_seg_size;
bio->bi_phys_segments = nr_phys_segs;
bio->bi_hw_segments = nr_hw_segs;
bio->bi_flags |= (1 << BIO_SEG_VALID);
if (nr_hw_segs == 1 &&
hw_seg_size > rq->bio->bi_hw_front_size)
rq->bio->bi_hw_front_size = hw_seg_size;
if (hw_seg_size > rq->biotail->bi_hw_back_size)
rq->biotail->bi_hw_back_size = hw_seg_size;
rq->nr_phys_segments = nr_phys_segs;
rq->nr_hw_segments = nr_hw_segs;
}
EXPORT_SYMBOL(blk_recount_segments);
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
@ -1325,8 +1332,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
struct scatterlist *sg)
{
struct bio_vec *bvec, *bvprv;
struct bio *bio;
int nsegs, i, cluster;
struct req_iterator iter;
int nsegs, cluster;
nsegs = 0;
cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
@ -1335,11 +1342,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
* for each bio in rq
*/
bvprv = NULL;
rq_for_each_bio(bio, rq) {
/*
* for each segment in bio
*/
bio_for_each_segment(bvec, bio, i) {
rq_for_each_segment(bvec, rq, iter) {
int nbytes = bvec->bv_len;
if (bvprv && cluster) {
@ -1362,8 +1365,7 @@ new_segment:
nsegs++;
}
bvprv = bvec;
} /* segments in bio */
} /* bios in rq */
} /* segments in rq */
return nsegs;
}
@ -1420,7 +1422,8 @@ static inline int ll_new_hw_segment(struct request_queue *q,
return 1;
}
int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio)
static int ll_back_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio)
{
unsigned short max_sectors;
int len;
@ -1456,7 +1459,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *b
return ll_new_hw_segment(q, req, bio);
}
EXPORT_SYMBOL(ll_back_merge_fn);
static int ll_front_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio)
@ -2346,6 +2348,23 @@ static int __blk_rq_unmap_user(struct bio *bio)
return ret;
}
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
struct bio *bio)
{
if (!rq->bio)
blk_rq_bio_prep(q, rq, bio);
else if (!ll_back_merge_fn(q, rq, bio))
return -EINVAL;
else {
rq->biotail->bi_next = bio;
rq->biotail = bio;
rq->data_len += bio->bi_size;
}
return 0;
}
EXPORT_SYMBOL(blk_rq_append_bio);
static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
void __user *ubuf, unsigned int len)
{
@ -2377,23 +2396,12 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
*/
bio_get(bio);
if (!rq->bio)
blk_rq_bio_prep(q, rq, bio);
else if (!ll_back_merge_fn(q, rq, bio)) {
ret = -EINVAL;
goto unmap_bio;
} else {
rq->biotail->bi_next = bio;
rq->biotail = bio;
rq->data_len += bio->bi_size;
}
ret = blk_rq_append_bio(q, rq, bio);
if (!ret)
return bio->bi_size;
unmap_bio:
/* if it was boucned we must call the end io function */
bio_endio(bio, bio->bi_size, 0);
bio_endio(bio, 0);
__blk_rq_unmap_user(orig_bio);
bio_put(bio);
return ret;
@ -2502,7 +2510,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
return PTR_ERR(bio);
if (bio->bi_size != len) {
bio_endio(bio, bio->bi_size, 0);
bio_endio(bio, 0);
bio_unmap_user(bio);
return -EINVAL;
}
@ -2912,15 +2920,9 @@ static void init_request_from_bio(struct request *req, struct bio *bio)
req->errors = 0;
req->hard_sector = req->sector = bio->bi_sector;
req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
req->current_nr_sectors = req->hard_cur_sectors = bio_cur_sectors(bio);
req->nr_phys_segments = bio_phys_segments(req->q, bio);
req->nr_hw_segments = bio_hw_segments(req->q, bio);
req->buffer = bio_data(bio); /* see ->buffer comment above */
req->bio = req->biotail = bio;
req->ioprio = bio_prio(bio);
req->rq_disk = bio->bi_bdev->bd_disk;
req->start_time = jiffies;
blk_rq_bio_prep(req->q, req, bio);
}
static int __make_request(struct request_queue *q, struct bio *bio)
@ -3038,7 +3040,7 @@ out:
return 0;
end_io:
bio_endio(bio, nr_sectors << 9, err);
bio_endio(bio, err);
return 0;
}
@ -3185,7 +3187,7 @@ static inline void __generic_make_request(struct bio *bio)
bdevname(bio->bi_bdev, b),
(long long) bio->bi_sector);
end_io:
bio_endio(bio, bio->bi_size, -EIO);
bio_endio(bio, -EIO);
break;
}
@ -3329,48 +3331,6 @@ void submit_bio(int rw, struct bio *bio)
EXPORT_SYMBOL(submit_bio);
static void blk_recalc_rq_segments(struct request *rq)
{
struct bio *bio, *prevbio = NULL;
int nr_phys_segs, nr_hw_segs;
unsigned int phys_size, hw_size;
struct request_queue *q = rq->q;
if (!rq->bio)
return;
phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
rq_for_each_bio(bio, rq) {
/* Force bio hw/phys segs to be recalculated. */
bio->bi_flags &= ~(1 << BIO_SEG_VALID);
nr_phys_segs += bio_phys_segments(q, bio);
nr_hw_segs += bio_hw_segments(q, bio);
if (prevbio) {
int pseg = phys_size + prevbio->bi_size + bio->bi_size;
int hseg = hw_size + prevbio->bi_size + bio->bi_size;
if (blk_phys_contig_segment(q, prevbio, bio) &&
pseg <= q->max_segment_size) {
nr_phys_segs--;
phys_size += prevbio->bi_size + bio->bi_size;
} else
phys_size = 0;
if (blk_hw_contig_segment(q, prevbio, bio) &&
hseg <= q->max_segment_size) {
nr_hw_segs--;
hw_size += prevbio->bi_size + bio->bi_size;
} else
hw_size = 0;
}
prevbio = bio;
}
rq->nr_phys_segments = nr_phys_segs;
rq->nr_hw_segments = nr_hw_segs;
}
static void blk_recalc_rq_sectors(struct request *rq, int nsect)
{
if (blk_fs_request(rq)) {
@ -3442,8 +3402,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
if (nr_bytes >= bio->bi_size) {
req->bio = bio->bi_next;
nbytes = bio->bi_size;
if (!ordered_bio_endio(req, bio, nbytes, error))
bio_endio(bio, nbytes, error);
req_bio_endio(req, bio, nbytes, error);
next_idx = 0;
bio_nbytes = 0;
} else {
@ -3498,8 +3457,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
* if the request wasn't completed, update state
*/
if (bio_nbytes) {
if (!ordered_bio_endio(req, bio, bio_nbytes, error))
bio_endio(bio, bio_nbytes, error);
req_bio_endio(req, bio, bio_nbytes, error);
bio->bi_idx += next_idx;
bio_iovec(bio)->bv_offset += nr_bytes;
bio_iovec(bio)->bv_len -= nr_bytes;
@ -3574,7 +3532,7 @@ static void blk_done_softirq(struct softirq_action *h)
}
}
static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action,
void *hcpu)
{
/*
@ -3595,7 +3553,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
}
static struct notifier_block __devinitdata blk_cpu_notifier = {
static struct notifier_block blk_cpu_notifier __cpuinitdata = {
.notifier_call = blk_cpu_notify,
};
@ -3680,7 +3638,7 @@ void end_request(struct request *req, int uptodate)
EXPORT_SYMBOL(end_request);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio)
{
/* first two bits are identical in rq->cmd_flags and bio->bi_rw */
@ -3695,9 +3653,10 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
rq->data_len = bio->bi_size;
rq->bio = rq->biotail = bio;
}
EXPORT_SYMBOL(blk_rq_bio_prep);
if (bio->bi_bdev)
rq->rq_disk = bio->bi_bdev->bd_disk;
}
int kblockd_schedule_work(struct work_struct *work)
{

View File

@ -138,7 +138,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
buf = mempool_alloc(d->bufpool, GFP_NOIO);
if (buf == NULL) {
printk(KERN_INFO "aoe: buf allocation failure\n");
bio_endio(bio, bio->bi_size, -ENOMEM);
bio_endio(bio, -ENOMEM);
return 0;
}
memset(buf, 0, sizeof(*buf));
@ -159,7 +159,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
d->aoemajor, d->aoeminor);
spin_unlock_irqrestore(&d->lock, flags);
mempool_free(buf, d->bufpool);
bio_endio(bio, bio->bi_size, -ENXIO);
bio_endio(bio, -ENXIO);
return 0;
}

View File

@ -652,7 +652,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
disk_stat_add(disk, sectors[rw], n_sect);
disk_stat_add(disk, io_ticks, duration);
n = (buf->flags & BUFFL_FAIL) ? -EIO : 0;
bio_endio(buf->bio, buf->bio->bi_size, n);
bio_endio(buf->bio, n);
mempool_free(buf, d->bufpool);
}
}

View File

@ -119,7 +119,7 @@ aoedev_downdev(struct aoedev *d)
bio = buf->bio;
if (--buf->nframesout == 0) {
mempool_free(buf, d->bufpool);
bio_endio(bio, bio->bi_size, -EIO);
bio_endio(bio, -EIO);
}
skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0;
}
@ -130,7 +130,7 @@ aoedev_downdev(struct aoedev *d)
list_del(d->bufq.next);
bio = buf->bio;
mempool_free(buf, d->bufpool);
bio_endio(bio, bio->bi_size, -EIO);
bio_endio(bio, -EIO);
}
if (d->gd)

View File

@ -1194,7 +1194,7 @@ static inline void complete_buffers(struct bio *bio, int status)
int nr_sectors = bio_sectors(bio);
bio->bi_next = NULL;
bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
bio_endio(bio, status ? 0 : -EIO);
bio = xbh;
}
}

View File

@ -987,7 +987,7 @@ static inline void complete_buffers(struct bio *bio, int ok)
xbh = bio->bi_next;
bio->bi_next = NULL;
bio_endio(bio, nr_sectors << 9, ok ? 0 : -EIO);
bio_endio(bio, ok ? 0 : -EIO);
bio = xbh;
}

View File

@ -2437,23 +2437,20 @@ static void rw_interrupt(void)
/* Compute maximal contiguous buffer size. */
static int buffer_chain_size(void)
{
struct bio *bio;
struct bio_vec *bv;
int size, i;
int size;
struct req_iterator iter;
char *base;
base = bio_data(current_req->bio);
size = 0;
rq_for_each_bio(bio, current_req) {
bio_for_each_segment(bv, bio, i) {
if (page_address(bv->bv_page) + bv->bv_offset !=
base + size)
rq_for_each_segment(bv, current_req, iter) {
if (page_address(bv->bv_page) + bv->bv_offset != base + size)
break;
size += bv->bv_len;
}
}
return size >> 9;
}
@ -2479,9 +2476,9 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
{
int remaining; /* number of transferred 512-byte sectors */
struct bio_vec *bv;
struct bio *bio;
char *buffer, *dma_buffer;
int size, i;
int size;
struct req_iterator iter;
max_sector = transfer_size(ssize,
min(max_sector, max_sector_2),
@ -2514,8 +2511,7 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
size = current_req->current_nr_sectors << 9;
rq_for_each_bio(bio, current_req) {
bio_for_each_segment(bv, bio, i) {
rq_for_each_segment(bv, current_req, iter) {
if (!remaining)
break;
@ -2551,7 +2547,6 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
remaining -= size;
dma_buffer += size;
}
}
#ifdef FLOPPY_SANITY_CHECK
if (remaining) {
if (remaining > 0)
@ -3815,14 +3810,10 @@ static int check_floppy_change(struct gendisk *disk)
* a disk in the drive, and whether that disk is writable.
*/
static int floppy_rb0_complete(struct bio *bio, unsigned int bytes_done,
static void floppy_rb0_complete(struct bio *bio,
int err)
{
if (bio->bi_size)
return 1;
complete((struct completion *)bio->bi_private);
return 0;
}
static int __floppy_read_block_0(struct block_device *bdev)

View File

@ -142,12 +142,11 @@ static irqreturn_t lgb_irq(int irq, void *_bd)
* return the total length. */
static unsigned int req_to_dma(struct request *req, struct lguest_dma *dma)
{
unsigned int i = 0, idx, len = 0;
struct bio *bio;
rq_for_each_bio(bio, req) {
unsigned int i = 0, len = 0;
struct req_iterator iter;
struct bio_vec *bvec;
bio_for_each_segment(bvec, bio, idx) {
rq_for_each_segment(bvec, req, iter) {
/* We told the block layer not to give us too many. */
BUG_ON(i == LGUEST_MAX_DMA_SECTIONS);
/* If we had a zero-length segment, it would look like
@ -161,7 +160,6 @@ static unsigned int req_to_dma(struct request *req, struct lguest_dma *dma)
len += bvec->bv_len;
i++;
}
}
/* If the array isn't full, we mark the end with a 0 length */
if (i < LGUEST_MAX_DMA_SECTIONS)
dma->len[i] = 0;

View File

@ -551,7 +551,7 @@ static int loop_make_request(struct request_queue *q, struct bio *old_bio)
out:
spin_unlock_irq(&lo->lo_lock);
bio_io_error(old_bio, old_bio->bi_size);
bio_io_error(old_bio);
return 0;
}
@ -580,7 +580,7 @@ static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
bio_put(bio);
} else {
int ret = do_bio_filebacked(lo, bio);
bio_endio(bio, bio->bi_size, ret);
bio_endio(bio, ret);
}
}

View File

@ -180,7 +180,7 @@ static inline int sock_send_bvec(struct socket *sock, struct bio_vec *bvec,
static int nbd_send_req(struct nbd_device *lo, struct request *req)
{
int result, i, flags;
int result, flags;
struct nbd_request request;
unsigned long size = req->nr_sectors << 9;
struct socket *sock = lo->sock;
@ -205,30 +205,26 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
}
if (nbd_cmd(req) == NBD_CMD_WRITE) {
struct bio *bio;
struct req_iterator iter;
struct bio_vec *bvec;
/*
* we are really probing at internals to determine
* whether to set MSG_MORE or not...
*/
rq_for_each_bio(bio, req) {
struct bio_vec *bvec;
bio_for_each_segment(bvec, bio, i) {
rq_for_each_segment(bvec, req, iter) {
flags = 0;
if ((i < (bio->bi_vcnt - 1)) || bio->bi_next)
if (!rq_iter_last(req, iter))
flags = MSG_MORE;
dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
lo->disk->disk_name, req,
bvec->bv_len);
lo->disk->disk_name, req, bvec->bv_len);
result = sock_send_bvec(sock, bvec, flags);
if (result <= 0) {
printk(KERN_ERR "%s: Send data failed (result %d)\n",
lo->disk->disk_name,
result);
lo->disk->disk_name, result);
goto error_out;
}
}
}
}
return 0;
error_out:
@ -321,16 +317,14 @@ static struct request *nbd_read_stat(struct nbd_device *lo)
dprintk(DBG_RX, "%s: request %p: got reply\n",
lo->disk->disk_name, req);
if (nbd_cmd(req) == NBD_CMD_READ) {
int i;
struct bio *bio;
rq_for_each_bio(bio, req) {
struct req_iterator iter;
struct bio_vec *bvec;
bio_for_each_segment(bvec, bio, i) {
rq_for_each_segment(bvec, req, iter) {
result = sock_recv_bvec(sock, bvec);
if (result <= 0) {
printk(KERN_ERR "%s: Receive data failed (result %d)\n",
lo->disk->disk_name,
result);
lo->disk->disk_name, result);
req->errors++;
return req;
}
@ -338,7 +332,6 @@ static struct request *nbd_read_stat(struct nbd_device *lo)
lo->disk->disk_name, req, bvec->bv_len);
}
}
}
return req;
harderror:
lo->harderror = result;

View File

@ -1058,15 +1058,12 @@ static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
}
}
static int pkt_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
static void pkt_end_io_read(struct bio *bio, int err)
{
struct packet_data *pkt = bio->bi_private;
struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd);
if (bio->bi_size)
return 1;
VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio,
(unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err);
@ -1077,19 +1074,14 @@ static int pkt_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
wake_up(&pd->wqueue);
}
pkt_bio_finished(pd);
return 0;
}
static int pkt_end_io_packet_write(struct bio *bio, unsigned int bytes_done, int err)
static void pkt_end_io_packet_write(struct bio *bio, int err)
{
struct packet_data *pkt = bio->bi_private;
struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd);
if (bio->bi_size)
return 1;
VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err);
pd->stats.pkt_ended++;
@ -1098,7 +1090,6 @@ static int pkt_end_io_packet_write(struct bio *bio, unsigned int bytes_done, int
atomic_dec(&pkt->io_wait);
atomic_inc(&pkt->run_sm);
wake_up(&pd->wqueue);
return 0;
}
/*
@ -1470,7 +1461,7 @@ static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
while (bio) {
next = bio->bi_next;
bio->bi_next = NULL;
bio_endio(bio, bio->bi_size, uptodate ? 0 : -EIO);
bio_endio(bio, uptodate ? 0 : -EIO);
bio = next;
}
pkt->orig_bios = pkt->orig_bios_tail = NULL;
@ -2462,19 +2453,15 @@ static int pkt_close(struct inode *inode, struct file *file)
}
static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err)
static void pkt_end_io_read_cloned(struct bio *bio, int err)
{
struct packet_stacked_data *psd = bio->bi_private;
struct pktcdvd_device *pd = psd->pd;
if (bio->bi_size)
return 1;
bio_put(bio);
bio_endio(psd->bio, psd->bio->bi_size, err);
bio_endio(psd->bio, err);
mempool_free(psd, psd_pool);
pkt_bio_finished(pd);
return 0;
}
static int pkt_make_request(struct request_queue *q, struct bio *bio)
@ -2620,7 +2607,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
}
return 0;
end_io:
bio_io_error(bio, bio->bi_size);
bio_io_error(bio);
return 0;
}

View File

@ -91,30 +91,29 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
struct request *req, int gather)
{
unsigned int offset = 0;
struct bio *bio;
sector_t sector;
struct req_iterator iter;
struct bio_vec *bvec;
unsigned int i = 0, j;
unsigned int i = 0;
size_t size;
void *buf;
rq_for_each_bio(bio, req) {
sector = bio->bi_sector;
rq_for_each_segment(bvec, req, iter) {
unsigned long flags;
dev_dbg(&dev->sbd.core,
"%s:%u: bio %u: %u segs %u sectors from %lu\n",
__func__, __LINE__, i, bio_segments(bio),
bio_sectors(bio), sector);
bio_for_each_segment(bvec, bio, j) {
__func__, __LINE__, i, bio_segments(iter.bio),
bio_sectors(iter.bio),
(unsigned long)iter.bio->bi_sector);
size = bvec->bv_len;
buf = __bio_kmap_atomic(bio, j, KM_IRQ0);
buf = bvec_kmap_irq(bvec, &flags);
if (gather)
memcpy(dev->bounce_buf+offset, buf, size);
else
memcpy(buf, dev->bounce_buf+offset, size);
offset += size;
flush_kernel_dcache_page(bio_iovec_idx(bio, j)->bv_page);
__bio_kunmap_atomic(bio, KM_IRQ0);
}
flush_kernel_dcache_page(bvec->bv_page);
bvec_kunmap_irq(bvec, &flags);
i++;
}
}
@ -130,12 +129,13 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
#ifdef DEBUG
unsigned int n = 0;
struct bio *bio;
struct bio_vec *bv;
struct req_iterator iter;
rq_for_each_bio(bio, req)
rq_for_each_segment(bv, req, iter)
n++;
dev_dbg(&dev->sbd.core,
"%s:%u: %s req has %u bios for %lu sectors %lu hard sectors\n",
"%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n",
__func__, __LINE__, op, n, req->nr_sectors,
req->hard_nr_sectors);
#endif

View File

@ -287,10 +287,10 @@ static int rd_make_request(struct request_queue *q, struct bio *bio)
if (ret)
goto fail;
bio_endio(bio, bio->bi_size, 0);
bio_endio(bio, 0);
return 0;
fail:
bio_io_error(bio, bio->bi_size);
bio_io_error(bio);
return 0;
}

View File

@ -52,7 +52,7 @@
#include <linux/fcntl.h> /* O_ACCMODE */
#include <linux/hdreg.h> /* HDIO_GETGEO */
#include <linux/umem.h>
#include "umem.h"
#include <asm/uaccess.h>
#include <asm/io.h>
@ -67,6 +67,7 @@
* Version Information
*/
#define DRIVER_NAME "umem"
#define DRIVER_VERSION "v2.3"
#define DRIVER_AUTHOR "San Mehat, Johannes Erdfelt, NeilBrown"
#define DRIVER_DESC "Micro Memory(tm) PCI memory board block driver"
@ -97,15 +98,9 @@ static int major_nr;
#include <linux/blkpg.h>
struct cardinfo {
int card_number;
struct pci_dev *dev;
int irq;
unsigned long csr_base;
unsigned char __iomem *csr_remap;
unsigned long csr_len;
unsigned int win_size; /* PCI window size */
unsigned int mm_size; /* size in kbytes */
unsigned int init_size; /* initial segment, in sectors,
@ -113,6 +108,8 @@ struct cardinfo {
* have been written
*/
struct bio *bio, *currentbio, **biotail;
int current_idx;
sector_t current_sector;
struct request_queue *queue;
@ -121,6 +118,7 @@ struct cardinfo {
struct mm_dma_desc *desc;
int cnt, headcnt;
struct bio *bio, **biotail;
int idx;
} mm_pages[2];
#define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc))
@ -233,7 +231,7 @@ static void dump_regs(struct cardinfo *card)
*/
static void dump_dmastat(struct cardinfo *card, unsigned int dmastat)
{
printk(KERN_DEBUG "MM%d*: DMAstat - ", card->card_number);
dev_printk(KERN_DEBUG, &card->dev->dev, "DMAstat - ");
if (dmastat & DMASCR_ANY_ERR)
printk("ANY_ERR ");
if (dmastat & DMASCR_MBE_ERR)
@ -380,12 +378,16 @@ static int add_bio(struct cardinfo *card)
dma_addr_t dma_handle;
int offset;
struct bio *bio;
struct bio_vec *vec;
int idx;
int rw;
int len;
bio = card->currentbio;
if (!bio && card->bio) {
card->currentbio = card->bio;
card->current_idx = card->bio->bi_idx;
card->current_sector = card->bio->bi_sector;
card->bio = card->bio->bi_next;
if (card->bio == NULL)
card->biotail = &card->bio;
@ -394,15 +396,17 @@ static int add_bio(struct cardinfo *card)
}
if (!bio)
return 0;
idx = card->current_idx;
rw = bio_rw(bio);
if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
return 0;
len = bio_iovec(bio)->bv_len;
vec = bio_iovec_idx(bio, idx);
len = vec->bv_len;
dma_handle = pci_map_page(card->dev,
bio_page(bio),
bio_offset(bio),
vec->bv_page,
vec->bv_offset,
len,
(rw==READ) ?
PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
@ -410,6 +414,8 @@ static int add_bio(struct cardinfo *card)
p = &card->mm_pages[card->Ready];
desc = &p->desc[p->cnt];
p->cnt++;
if (p->bio == NULL)
p->idx = idx;
if ((p->biotail) != &bio->bi_next) {
*(p->biotail) = bio;
p->biotail = &(bio->bi_next);
@ -419,7 +425,7 @@ static int add_bio(struct cardinfo *card)
desc->data_dma_handle = dma_handle;
desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle);
desc->local_addr= cpu_to_le64(bio->bi_sector << 9);
desc->local_addr = cpu_to_le64(card->current_sector << 9);
desc->transfer_size = cpu_to_le32(len);
offset = ( ((char*)&desc->sem_control_bits) - ((char*)p->desc));
desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset));
@ -435,10 +441,10 @@ static int add_bio(struct cardinfo *card)
desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
desc->sem_control_bits = desc->control_bits;
bio->bi_sector += (len>>9);
bio->bi_size -= len;
bio->bi_idx++;
if (bio->bi_idx >= bio->bi_vcnt)
card->current_sector += (len >> 9);
idx++;
card->current_idx = idx;
if (idx >= bio->bi_vcnt)
card->currentbio = NULL;
return 1;
@ -474,10 +480,12 @@ static void process_page(unsigned long data)
last=1;
}
page->headcnt++;
idx = bio->bi_phys_segments;
bio->bi_phys_segments++;
if (bio->bi_phys_segments >= bio->bi_vcnt)
idx = page->idx;
page->idx++;
if (page->idx >= bio->bi_vcnt) {
page->bio = bio->bi_next;
page->idx = page->bio->bi_idx;
}
pci_unmap_page(card->dev, desc->data_dma_handle,
bio_iovec_idx(bio,idx)->bv_len,
@ -486,8 +494,8 @@ static void process_page(unsigned long data)
if (control & DMASCR_HARD_ERROR) {
/* error */
clear_bit(BIO_UPTODATE, &bio->bi_flags);
printk(KERN_WARNING "MM%d: I/O error on sector %d/%d\n",
card->card_number,
dev_printk(KERN_WARNING, &card->dev->dev,
"I/O error on sector %d/%d\n",
le32_to_cpu(desc->local_addr)>>9,
le32_to_cpu(desc->transfer_size));
dump_dmastat(card, control);
@ -495,8 +503,8 @@ static void process_page(unsigned long data)
le32_to_cpu(desc->local_addr)>>9 == card->init_size) {
card->init_size += le32_to_cpu(desc->transfer_size)>>9;
if (card->init_size>>1 >= card->mm_size) {
printk(KERN_INFO "MM%d: memory now initialised\n",
card->card_number);
dev_printk(KERN_INFO, &card->dev->dev,
"memory now initialised\n");
set_userbit(card, MEMORY_INITIALIZED, 1);
}
}
@ -532,7 +540,7 @@ static void process_page(unsigned long data)
return_bio = bio->bi_next;
bio->bi_next = NULL;
bio_endio(bio, bio->bi_size, 0);
bio_endio(bio, 0);
}
}
@ -547,7 +555,6 @@ static int mm_make_request(struct request_queue *q, struct bio *bio)
pr_debug("mm_make_request %llu %u\n",
(unsigned long long)bio->bi_sector, bio->bi_size);
bio->bi_phys_segments = bio->bi_idx; /* count of completed segments*/
spin_lock_irq(&card->lock);
*card->biotail = bio;
bio->bi_next = NULL;
@ -606,46 +613,51 @@ HW_TRACE(0x30);
dump_dmastat(card, dma_status);
if (stat & 0x01)
printk(KERN_ERR "MM%d*: Memory access error detected (err count %d)\n",
card->card_number, count);
dev_printk(KERN_ERR, &card->dev->dev,
"Memory access error detected (err count %d)\n",
count);
if (stat & 0x02)
printk(KERN_ERR "MM%d*: Multi-bit EDC error\n",
card->card_number);
dev_printk(KERN_ERR, &card->dev->dev,
"Multi-bit EDC error\n");
printk(KERN_ERR "MM%d*: Fault Address 0x%02x%08x, Fault Data 0x%08x%08x\n",
card->card_number, addr_log2, addr_log1, data_log2, data_log1);
printk(KERN_ERR "MM%d*: Fault Check 0x%02x, Fault Syndrome 0x%02x\n",
card->card_number, check, syndrome);
dev_printk(KERN_ERR, &card->dev->dev,
"Fault Address 0x%02x%08x, Fault Data 0x%08x%08x\n",
addr_log2, addr_log1, data_log2, data_log1);
dev_printk(KERN_ERR, &card->dev->dev,
"Fault Check 0x%02x, Fault Syndrome 0x%02x\n",
check, syndrome);
writeb(0, card->csr_remap + ERROR_COUNT);
}
if (dma_status & DMASCR_PARITY_ERR_REP) {
printk(KERN_ERR "MM%d*: PARITY ERROR REPORTED\n", card->card_number);
dev_printk(KERN_ERR, &card->dev->dev,
"PARITY ERROR REPORTED\n");
pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
}
if (dma_status & DMASCR_PARITY_ERR_DET) {
printk(KERN_ERR "MM%d*: PARITY ERROR DETECTED\n", card->card_number);
dev_printk(KERN_ERR, &card->dev->dev,
"PARITY ERROR DETECTED\n");
pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
}
if (dma_status & DMASCR_SYSTEM_ERR_SIG) {
printk(KERN_ERR "MM%d*: SYSTEM ERROR\n", card->card_number);
dev_printk(KERN_ERR, &card->dev->dev, "SYSTEM ERROR\n");
pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
}
if (dma_status & DMASCR_TARGET_ABT) {
printk(KERN_ERR "MM%d*: TARGET ABORT\n", card->card_number);
dev_printk(KERN_ERR, &card->dev->dev, "TARGET ABORT\n");
pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
}
if (dma_status & DMASCR_MASTER_ABT) {
printk(KERN_ERR "MM%d*: MASTER ABORT\n", card->card_number);
dev_printk(KERN_ERR, &card->dev->dev, "MASTER ABORT\n");
pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
}
@ -696,20 +708,20 @@ static int check_battery(struct cardinfo *card, int battery, int status)
card->battery[battery].last_change = jiffies;
if (card->battery[battery].good) {
printk(KERN_ERR "MM%d: Battery %d now good\n",
card->card_number, battery + 1);
dev_printk(KERN_ERR, &card->dev->dev,
"Battery %d now good\n", battery + 1);
card->battery[battery].warned = 0;
} else
printk(KERN_ERR "MM%d: Battery %d now FAILED\n",
card->card_number, battery + 1);
dev_printk(KERN_ERR, &card->dev->dev,
"Battery %d now FAILED\n", battery + 1);
return 1;
} else if (!card->battery[battery].good &&
!card->battery[battery].warned &&
time_after_eq(jiffies, card->battery[battery].last_change +
(HZ * 60 * 60 * 5))) {
printk(KERN_ERR "MM%d: Battery %d still FAILED after 5 hours\n",
card->card_number, battery + 1);
dev_printk(KERN_ERR, &card->dev->dev,
"Battery %d still FAILED after 5 hours\n", battery + 1);
card->battery[battery].warned = 1;
return 1;
@ -733,8 +745,8 @@ static void check_batteries(struct cardinfo *card)
status = readb(card->csr_remap + MEMCTRLSTATUS_BATTERY);
if (debug & DEBUG_BATTERY_POLLING)
printk(KERN_DEBUG "MM%d: checking battery status, 1 = %s, 2 = %s\n",
card->card_number,
dev_printk(KERN_DEBUG, &card->dev->dev,
"checking battery status, 1 = %s, 2 = %s\n",
(status & BATTERY_1_FAILURE) ? "FAILURE" : "OK",
(status & BATTERY_2_FAILURE) ? "FAILURE" : "OK");
@ -853,45 +865,56 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
unsigned char mem_present;
unsigned char batt_status;
unsigned int saved_bar, data;
unsigned long csr_base;
unsigned long csr_len;
int magic_number;
static int printed_version;
if (pci_enable_device(dev) < 0)
return -ENODEV;
if (!printed_version++)
printk(KERN_INFO DRIVER_VERSION " : " DRIVER_DESC "\n");
ret = pci_enable_device(dev);
if (ret)
return ret;
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF8);
pci_set_master(dev);
card->dev = dev;
card->card_number = num_cards;
card->csr_base = pci_resource_start(dev, 0);
card->csr_len = pci_resource_len(dev, 0);
csr_base = pci_resource_start(dev, 0);
csr_len = pci_resource_len(dev, 0);
if (!csr_base || !csr_len)
return -ENODEV;
printk(KERN_INFO "Micro Memory(tm) controller #%d found at %02x:%02x (PCI Mem Module (Battery Backup))\n",
card->card_number, dev->bus->number, dev->devfn);
dev_printk(KERN_INFO, &dev->dev,
"Micro Memory(tm) controller found (PCI Mem Module (Battery Backup))\n");
if (pci_set_dma_mask(dev, DMA_64BIT_MASK) &&
pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
printk(KERN_WARNING "MM%d: NO suitable DMA found\n",num_cards);
dev_printk(KERN_WARNING, &dev->dev, "NO suitable DMA found\n");
return -ENOMEM;
}
if (!request_mem_region(card->csr_base, card->csr_len, "Micro Memory")) {
printk(KERN_ERR "MM%d: Unable to request memory region\n", card->card_number);
ret = -ENOMEM;
ret = pci_request_regions(dev, DRIVER_NAME);
if (ret) {
dev_printk(KERN_ERR, &card->dev->dev,
"Unable to request memory region\n");
goto failed_req_csr;
}
card->csr_remap = ioremap_nocache(card->csr_base, card->csr_len);
card->csr_remap = ioremap_nocache(csr_base, csr_len);
if (!card->csr_remap) {
printk(KERN_ERR "MM%d: Unable to remap memory region\n", card->card_number);
dev_printk(KERN_ERR, &card->dev->dev,
"Unable to remap memory region\n");
ret = -ENOMEM;
goto failed_remap_csr;
}
printk(KERN_INFO "MM%d: CSR 0x%08lx -> 0x%p (0x%lx)\n", card->card_number,
card->csr_base, card->csr_remap, card->csr_len);
dev_printk(KERN_INFO, &card->dev->dev,
"CSR 0x%08lx -> 0x%p (0x%lx)\n",
csr_base, card->csr_remap, csr_len);
switch(card->dev->device) {
case 0x5415:
@ -915,7 +938,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
}
if (readb(card->csr_remap + MEMCTRLSTATUS_MAGIC) != magic_number) {
printk(KERN_ERR "MM%d: Magic number invalid\n", card->card_number);
dev_printk(KERN_ERR, &card->dev->dev, "Magic number invalid\n");
ret = -ENOMEM;
goto failed_magic;
}
@ -928,7 +951,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
&card->mm_pages[1].page_dma);
if (card->mm_pages[0].desc == NULL ||
card->mm_pages[1].desc == NULL) {
printk(KERN_ERR "MM%d: alloc failed\n", card->card_number);
dev_printk(KERN_ERR, &card->dev->dev, "alloc failed\n");
goto failed_alloc;
}
reset_page(&card->mm_pages[0]);
@ -983,11 +1006,12 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
card->battery[0].last_change = card->battery[1].last_change = jiffies;
if (card->flags & UM_FLAG_NO_BATT)
printk(KERN_INFO "MM%d: Size %d KB\n",
card->card_number, card->mm_size);
dev_printk(KERN_INFO, &card->dev->dev,
"Size %d KB\n", card->mm_size);
else {
printk(KERN_INFO "MM%d: Size %d KB, Battery 1 %s (%s), Battery 2 %s (%s)\n",
card->card_number, card->mm_size,
dev_printk(KERN_INFO, &card->dev->dev,
"Size %d KB, Battery 1 %s (%s), Battery 2 %s (%s)\n",
card->mm_size,
(batt_status & BATTERY_1_DISABLED ? "Disabled" : "Enabled"),
card->battery[0].good ? "OK" : "FAILURE",
(batt_status & BATTERY_2_DISABLED ? "Disabled" : "Enabled"),
@ -1005,19 +1029,16 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
data = ~data;
data += 1;
card->win_size = data;
if (request_irq(dev->irq, mm_interrupt, IRQF_SHARED, "pci-umem", card)) {
printk(KERN_ERR "MM%d: Unable to allocate IRQ\n", card->card_number);
if (request_irq(dev->irq, mm_interrupt, IRQF_SHARED, DRIVER_NAME, card)) {
dev_printk(KERN_ERR, &card->dev->dev,
"Unable to allocate IRQ\n");
ret = -ENODEV;
goto failed_req_irq;
}
card->irq = dev->irq;
printk(KERN_INFO "MM%d: Window size %d bytes, IRQ %d\n", card->card_number,
card->win_size, card->irq);
dev_printk(KERN_INFO, &card->dev->dev,
"Window size %d bytes, IRQ %d\n", data, dev->irq);
spin_lock_init(&card->lock);
@ -1037,10 +1058,12 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
num_cards++;
if (!get_userbit(card, MEMORY_INITIALIZED)) {
printk(KERN_INFO "MM%d: memory NOT initialized. Consider over-writing whole device.\n", card->card_number);
dev_printk(KERN_INFO, &card->dev->dev,
"memory NOT initialized. Consider over-writing whole device.\n");
card->init_size = 0;
} else {
printk(KERN_INFO "MM%d: memory already initialized\n", card->card_number);
dev_printk(KERN_INFO, &card->dev->dev,
"memory already initialized\n");
card->init_size = card->mm_size;
}
@ -1062,7 +1085,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
failed_magic:
iounmap(card->csr_remap);
failed_remap_csr:
release_mem_region(card->csr_base, card->csr_len);
pci_release_regions(dev);
failed_req_csr:
return ret;
@ -1077,9 +1100,8 @@ static void mm_pci_remove(struct pci_dev *dev)
struct cardinfo *card = pci_get_drvdata(dev);
tasklet_kill(&card->tasklet);
free_irq(dev->irq, card);
iounmap(card->csr_remap);
release_mem_region(card->csr_base, card->csr_len);
free_irq(card->irq, card);
if (card->mm_pages[0].desc)
pci_free_consistent(card->dev, PAGE_SIZE*2,
@ -1090,6 +1112,9 @@ static void mm_pci_remove(struct pci_dev *dev)
card->mm_pages[1].desc,
card->mm_pages[1].page_dma);
blk_cleanup_queue(card->queue);
pci_release_regions(dev);
pci_disable_device(dev);
}
static const struct pci_device_id mm_pci_ids[] = {
@ -1109,11 +1134,12 @@ static const struct pci_device_id mm_pci_ids[] = {
MODULE_DEVICE_TABLE(pci, mm_pci_ids);
static struct pci_driver mm_pci_driver = {
.name = "umem",
.name = DRIVER_NAME,
.id_table = mm_pci_ids,
.probe = mm_pci_probe,
.remove = mm_pci_remove,
};
/*
-----------------------------------------------------------------------------------
-- mm_init
@ -1125,13 +1151,11 @@ static int __init mm_init(void)
int retval, i;
int err;
printk(KERN_INFO DRIVER_VERSION " : " DRIVER_DESC "\n");
retval = pci_register_driver(&mm_pci_driver);
if (retval)
return -ENOMEM;
err = major_nr = register_blkdev(0, "umem");
err = major_nr = register_blkdev(0, DRIVER_NAME);
if (err < 0) {
pci_unregister_driver(&mm_pci_driver);
return -EIO;
@ -1157,13 +1181,13 @@ static int __init mm_init(void)
}
init_battery_timer();
printk("MM: desc_per_page = %ld\n", DESC_PER_PAGE);
printk(KERN_INFO "MM: desc_per_page = %ld\n", DESC_PER_PAGE);
/* printk("mm_init: Done. 10-19-01 9:00\n"); */
return 0;
out:
pci_unregister_driver(&mm_pci_driver);
unregister_blkdev(major_nr, "umem");
unregister_blkdev(major_nr, DRIVER_NAME);
while (i--)
put_disk(mm_gendisk[i]);
return -ENOMEM;
@ -1186,7 +1210,7 @@ static void __exit mm_cleanup(void)
pci_unregister_driver(&mm_pci_driver);
unregister_blkdev(major_nr, "umem");
unregister_blkdev(major_nr, DRIVER_NAME);
}
module_init(mm_init);

View File

@ -125,11 +125,6 @@ struct mm_dma_desc {
__le64 sem_control_bits;
} __attribute__((aligned(8)));
#define PCI_VENDOR_ID_MICRO_MEMORY 0x1332
#define PCI_DEVICE_ID_MICRO_MEMORY_5415CN 0x5415
#define PCI_DEVICE_ID_MICRO_MEMORY_5425CN 0x5425
#define PCI_DEVICE_ID_MICRO_MEMORY_6155 0x6155
/* bits for card->flags */
#define UM_FLAG_DMA_IN_REGS 1
#define UM_FLAG_NO_BYTE_STATUS 2

View File

@ -150,9 +150,8 @@ static int blkif_queue_request(struct request *req)
struct blkfront_info *info = req->rq_disk->private_data;
unsigned long buffer_mfn;
struct blkif_request *ring_req;
struct bio *bio;
struct req_iterator iter;
struct bio_vec *bvec;
int idx;
unsigned long id;
unsigned int fsect, lsect;
int ref;
@ -186,10 +185,8 @@ static int blkif_queue_request(struct request *req)
ring_req->operation = BLKIF_OP_WRITE_BARRIER;
ring_req->nr_segments = 0;
rq_for_each_bio (bio, req) {
bio_for_each_segment (bvec, bio, idx) {
BUG_ON(ring_req->nr_segments
== BLKIF_MAX_SEGMENTS_PER_REQUEST);
rq_for_each_segment(bvec, req, iter) {
BUG_ON(ring_req->nr_segments == BLKIF_MAX_SEGMENTS_PER_REQUEST);
buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page));
fsect = bvec->bv_offset >> 9;
lsect = fsect + (bvec->bv_len >> 9) - 1;
@ -214,7 +211,6 @@ static int blkif_queue_request(struct request *req)
ring_req->nr_segments++;
}
}
info->ring.req_prod_pvt++;

View File

@ -91,6 +91,10 @@
#include <linux/blkdev.h>
#include <linux/hdreg.h>
#include <linux/platform_device.h>
#if defined(CONFIG_OF)
#include <linux/of_device.h>
#include <linux/of_platform.h>
#endif
MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
MODULE_DESCRIPTION("Xilinx SystemACE device driver");
@ -158,6 +162,9 @@ MODULE_LICENSE("GPL");
#define ACE_FIFO_SIZE (32)
#define ACE_BUF_PER_SECTOR (ACE_SECTOR_SIZE / ACE_FIFO_SIZE)
#define ACE_BUS_WIDTH_8 0
#define ACE_BUS_WIDTH_16 1
struct ace_reg_ops;
struct ace_device {
@ -188,7 +195,7 @@ struct ace_device {
/* Details of hardware device */
unsigned long physaddr;
void *baseaddr;
void __iomem *baseaddr;
int irq;
int bus_width; /* 0 := 8 bit; 1 := 16 bit */
struct ace_reg_ops *reg_ops;
@ -220,20 +227,20 @@ struct ace_reg_ops {
/* 8 Bit bus width */
static u16 ace_in_8(struct ace_device *ace, int reg)
{
void *r = ace->baseaddr + reg;
void __iomem *r = ace->baseaddr + reg;
return in_8(r) | (in_8(r + 1) << 8);
}
static void ace_out_8(struct ace_device *ace, int reg, u16 val)
{
void *r = ace->baseaddr + reg;
void __iomem *r = ace->baseaddr + reg;
out_8(r, val);
out_8(r + 1, val >> 8);
}
static void ace_datain_8(struct ace_device *ace)
{
void *r = ace->baseaddr + 0x40;
void __iomem *r = ace->baseaddr + 0x40;
u8 *dst = ace->data_ptr;
int i = ACE_FIFO_SIZE;
while (i--)
@ -243,7 +250,7 @@ static void ace_datain_8(struct ace_device *ace)
static void ace_dataout_8(struct ace_device *ace)
{
void *r = ace->baseaddr + 0x40;
void __iomem *r = ace->baseaddr + 0x40;
u8 *src = ace->data_ptr;
int i = ACE_FIFO_SIZE;
while (i--)
@ -931,9 +938,11 @@ static int __devinit ace_setup(struct ace_device *ace)
{
u16 version;
u16 val;
int rc;
dev_dbg(ace->dev, "ace_setup(ace=0x%p)\n", ace);
dev_dbg(ace->dev, "physaddr=0x%lx irq=%i\n", ace->physaddr, ace->irq);
spin_lock_init(&ace->lock);
init_completion(&ace->id_completion);
@ -944,15 +953,6 @@ static int __devinit ace_setup(struct ace_device *ace)
if (!ace->baseaddr)
goto err_ioremap;
if (ace->irq != NO_IRQ) {
rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
if (rc) {
/* Failure - fall back to polled mode */
dev_err(ace->dev, "request_irq failed\n");
ace->irq = NO_IRQ;
}
}
/*
* Initialize the state machine tasklet and stall timer
*/
@ -982,7 +982,7 @@ static int __devinit ace_setup(struct ace_device *ace)
snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
/* set bus width */
if (ace->bus_width == 1) {
if (ace->bus_width == ACE_BUS_WIDTH_16) {
/* 0x0101 should work regardless of endianess */
ace_out_le16(ace, ACE_BUSMODE, 0x0101);
@ -1005,6 +1005,16 @@ static int __devinit ace_setup(struct ace_device *ace)
ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE |
ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);
/* Now we can hook up the irq handler */
if (ace->irq != NO_IRQ) {
rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
if (rc) {
/* Failure - fall back to polled mode */
dev_err(ace->dev, "request_irq failed\n");
ace->irq = NO_IRQ;
}
}
/* Enable interrupts */
val = ace_in(ace, ACE_CTRL);
val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ;
@ -1030,10 +1040,8 @@ static int __devinit ace_setup(struct ace_device *ace)
blk_cleanup_queue(ace->queue);
err_blk_initq:
iounmap(ace->baseaddr);
if (ace->irq != NO_IRQ)
free_irq(ace->irq, ace);
err_ioremap:
printk(KERN_INFO "xsysace: error initializing device at 0x%lx\n",
dev_info(ace->dev, "xsysace: error initializing device at 0x%lx\n",
ace->physaddr);
return -ENOMEM;
}
@ -1056,98 +1064,222 @@ static void __devexit ace_teardown(struct ace_device *ace)
iounmap(ace->baseaddr);
}
static int __devinit
ace_alloc(struct device *dev, int id, unsigned long physaddr,
int irq, int bus_width)
{
struct ace_device *ace;
int rc;
dev_dbg(dev, "ace_alloc(%p)\n", dev);
if (!physaddr) {
rc = -ENODEV;
goto err_noreg;
}
/* Allocate and initialize the ace device structure */
ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL);
if (!ace) {
rc = -ENOMEM;
goto err_alloc;
}
ace->dev = dev;
ace->id = id;
ace->physaddr = physaddr;
ace->irq = irq;
ace->bus_width = bus_width;
/* Call the setup code */
rc = ace_setup(ace);
if (rc)
goto err_setup;
dev_set_drvdata(dev, ace);
return 0;
err_setup:
dev_set_drvdata(dev, NULL);
kfree(ace);
err_alloc:
err_noreg:
dev_err(dev, "could not initialize device, err=%i\n", rc);
return rc;
}
static void __devexit ace_free(struct device *dev)
{
struct ace_device *ace = dev_get_drvdata(dev);
dev_dbg(dev, "ace_free(%p)\n", dev);
if (ace) {
ace_teardown(ace);
dev_set_drvdata(dev, NULL);
kfree(ace);
}
}
/* ---------------------------------------------------------------------
* Platform Bus Support
*/
static int __devinit ace_probe(struct device *device)
static int __devinit ace_probe(struct platform_device *dev)
{
struct platform_device *dev = to_platform_device(device);
struct ace_device *ace;
unsigned long physaddr = 0;
int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */
int id = dev->id;
int irq = NO_IRQ;
int i;
dev_dbg(device, "ace_probe(%p)\n", device);
/*
* Allocate the ace device structure
*/
ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL);
if (!ace)
goto err_alloc;
ace->dev = device;
ace->id = dev->id;
ace->irq = NO_IRQ;
dev_dbg(&dev->dev, "ace_probe(%p)\n", dev);
for (i = 0; i < dev->num_resources; i++) {
if (dev->resource[i].flags & IORESOURCE_MEM)
ace->physaddr = dev->resource[i].start;
physaddr = dev->resource[i].start;
if (dev->resource[i].flags & IORESOURCE_IRQ)
ace->irq = dev->resource[i].start;
irq = dev->resource[i].start;
}
/* FIXME: Should get bus_width from the platform_device struct */
ace->bus_width = 1;
dev_set_drvdata(&dev->dev, ace);
/* Call the bus-independant setup code */
if (ace_setup(ace) != 0)
goto err_setup;
return 0;
err_setup:
dev_set_drvdata(&dev->dev, NULL);
kfree(ace);
err_alloc:
printk(KERN_ERR "xsysace: could not initialize device\n");
return -ENOMEM;
return ace_alloc(&dev->dev, id, physaddr, irq, bus_width);
}
/*
* Platform bus remove() method
*/
static int __devexit ace_remove(struct device *device)
static int __devexit ace_remove(struct platform_device *dev)
{
struct ace_device *ace = dev_get_drvdata(device);
dev_dbg(device, "ace_remove(%p)\n", device);
if (ace) {
ace_teardown(ace);
kfree(ace);
}
ace_free(&dev->dev);
return 0;
}
static struct device_driver ace_driver = {
.name = "xsysace",
.bus = &platform_bus_type,
static struct platform_driver ace_platform_driver = {
.probe = ace_probe,
.remove = __devexit_p(ace_remove),
.driver = {
.owner = THIS_MODULE,
.name = "xsysace",
},
};
/* ---------------------------------------------------------------------
* OF_Platform Bus Support
*/
#if defined(CONFIG_OF)
static int __devinit
ace_of_probe(struct of_device *op, const struct of_device_id *match)
{
struct resource res;
unsigned long physaddr;
const u32 *id;
int irq, bus_width, rc;
dev_dbg(&op->dev, "ace_of_probe(%p, %p)\n", op, match);
/* device id */
id = of_get_property(op->node, "port-number", NULL);
/* physaddr */
rc = of_address_to_resource(op->node, 0, &res);
if (rc) {
dev_err(&op->dev, "invalid address\n");
return rc;
}
physaddr = res.start;
/* irq */
irq = irq_of_parse_and_map(op->node, 0);
/* bus width */
bus_width = ACE_BUS_WIDTH_16;
if (of_find_property(op->node, "8-bit", NULL))
bus_width = ACE_BUS_WIDTH_8;
/* Call the bus-independant setup code */
return ace_alloc(&op->dev, id ? *id : 0, physaddr, irq, bus_width);
}
static int __devexit ace_of_remove(struct of_device *op)
{
ace_free(&op->dev);
return 0;
}
/* Match table for of_platform binding */
static struct of_device_id __devinit ace_of_match[] = {
{ .compatible = "xilinx,xsysace", },
{},
};
MODULE_DEVICE_TABLE(of, ace_of_match);
static struct of_platform_driver ace_of_driver = {
.owner = THIS_MODULE,
.name = "xsysace",
.match_table = ace_of_match,
.probe = ace_of_probe,
.remove = __devexit_p(ace_of_remove),
.driver = {
.name = "xsysace",
},
};
/* Registration helpers to keep the number of #ifdefs to a minimum */
static inline int __init ace_of_register(void)
{
pr_debug("xsysace: registering OF binding\n");
return of_register_platform_driver(&ace_of_driver);
}
static inline void __exit ace_of_unregister(void)
{
of_unregister_platform_driver(&ace_of_driver);
}
#else /* CONFIG_OF */
/* CONFIG_OF not enabled; do nothing helpers */
static inline int __init ace_of_register(void) { return 0; }
static inline void __exit ace_of_unregister(void) { }
#endif /* CONFIG_OF */
/* ---------------------------------------------------------------------
* Module init/exit routines
*/
static int __init ace_init(void)
{
int rc;
ace_major = register_blkdev(ace_major, "xsysace");
if (ace_major <= 0) {
printk(KERN_WARNING "xsysace: register_blkdev() failed\n");
return ace_major;
rc = -ENOMEM;
goto err_blk;
}
pr_debug("Registering Xilinx SystemACE driver, major=%i\n", ace_major);
return driver_register(&ace_driver);
rc = ace_of_register();
if (rc)
goto err_of;
pr_debug("xsysace: registering platform binding\n");
rc = platform_driver_register(&ace_platform_driver);
if (rc)
goto err_plat;
pr_info("Xilinx SystemACE device driver, major=%i\n", ace_major);
return 0;
err_plat:
ace_of_unregister();
err_of:
unregister_blkdev(ace_major, "xsysace");
err_blk:
printk(KERN_ERR "xsysace: registration failed; err=%i\n", rc);
return rc;
}
static void __exit ace_exit(void)
{
pr_debug("Unregistering Xilinx SystemACE driver\n");
driver_unregister(&ace_driver);
platform_driver_unregister(&ace_platform_driver);
ace_of_unregister();
unregister_blkdev(ace_major, "xsysace");
}

View File

@ -606,13 +606,12 @@ static void idefloppy_input_buffers (ide_drive_t *drive, idefloppy_pc_t *pc, uns
{
struct request *rq = pc->rq;
struct bio_vec *bvec;
struct bio *bio;
struct req_iterator iter;
unsigned long flags;
char *data;
int count, i, done = 0;
int count, done = 0;
rq_for_each_bio(bio, rq) {
bio_for_each_segment(bvec, bio, i) {
rq_for_each_segment(bvec, rq, iter) {
if (!bcount)
break;
@ -626,7 +625,6 @@ static void idefloppy_input_buffers (ide_drive_t *drive, idefloppy_pc_t *pc, uns
pc->b_count += count;
done += count;
}
}
idefloppy_do_end_request(drive, 1, done >> 9);
@ -639,14 +637,13 @@ static void idefloppy_input_buffers (ide_drive_t *drive, idefloppy_pc_t *pc, uns
static void idefloppy_output_buffers (ide_drive_t *drive, idefloppy_pc_t *pc, unsigned int bcount)
{
struct request *rq = pc->rq;
struct bio *bio;
struct req_iterator iter;
struct bio_vec *bvec;
unsigned long flags;
int count, i, done = 0;
int count, done = 0;
char *data;
rq_for_each_bio(bio, rq) {
bio_for_each_segment(bvec, bio, i) {
rq_for_each_segment(bvec, rq, iter) {
if (!bcount)
break;
@ -660,7 +657,6 @@ static void idefloppy_output_buffers (ide_drive_t *drive, idefloppy_pc_t *pc, un
pc->b_count += count;
done += count;
}
}
idefloppy_do_end_request(drive, 1, done >> 9);

View File

@ -489,7 +489,7 @@ static void dec_pending(struct dm_crypt_io *io, int error)
if (!atomic_dec_and_test(&io->pending))
return;
bio_endio(io->base_bio, io->base_bio->bi_size, io->error);
bio_endio(io->base_bio, io->error);
mempool_free(io, cc->io_pool);
}
@ -509,25 +509,19 @@ static void kcryptd_queue_io(struct dm_crypt_io *io)
queue_work(_kcryptd_workqueue, &io->work);
}
static int crypt_endio(struct bio *clone, unsigned int done, int error)
static void crypt_endio(struct bio *clone, int error)
{
struct dm_crypt_io *io = clone->bi_private;
struct crypt_config *cc = io->target->private;
unsigned read_io = bio_data_dir(clone) == READ;
/*
* free the processed pages, even if
* it's only a partially completed write
* free the processed pages
*/
if (!read_io)
crypt_free_buffer_pages(cc, clone, done);
/* keep going - not finished yet */
if (unlikely(clone->bi_size))
return 1;
if (!read_io)
if (!read_io) {
crypt_free_buffer_pages(cc, clone, clone->bi_size);
goto out;
}
if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) {
error = -EIO;
@ -537,12 +531,11 @@ static int crypt_endio(struct bio *clone, unsigned int done, int error)
bio_put(clone);
io->post_process = 1;
kcryptd_queue_io(io);
return 0;
return;
out:
bio_put(clone);
dec_pending(io, error);
return error;
}
static void clone_init(struct dm_crypt_io *io, struct bio *clone)

View File

@ -38,13 +38,10 @@ static inline void free_bio(struct bio *bio)
bio_put(bio);
}
static int emc_endio(struct bio *bio, unsigned int bytes_done, int error)
static void emc_endio(struct bio *bio, int error)
{
struct dm_path *path = bio->bi_private;
if (bio->bi_size)
return 1;
/* We also need to look at the sense keys here whether or not to
* switch to the next PG etc.
*
@ -109,15 +106,7 @@ static struct request *get_failover_req(struct emc_handler *h,
return NULL;
}
rq->bio = rq->biotail = bio;
blk_rq_bio_prep(q, rq, bio);
rq->rq_disk = bdev->bd_contains->bd_disk;
/* bio backed don't set data */
rq->buffer = rq->data = NULL;
/* rq data_len used for pc cmd's request_bufflen */
rq->data_len = bio->bi_size;
blk_rq_append_bio(q, rq, bio);
rq->sense = h->sense;
memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);

View File

@ -124,15 +124,11 @@ static void dec_count(struct io *io, unsigned int region, int error)
}
}
static int endio(struct bio *bio, unsigned int done, int error)
static void endio(struct bio *bio, int error)
{
struct io *io;
unsigned region;
/* keep going until we've finished */
if (bio->bi_size)
return 1;
if (error && bio_data_dir(bio) == READ)
zero_fill_bio(bio);
@ -146,8 +142,6 @@ static int endio(struct bio *bio, unsigned int done, int error)
bio_put(bio);
dec_count(io, region, error);
return 0;
}
/*-----------------------------------------------------------------

View File

@ -390,11 +390,11 @@ static void dispatch_queued_ios(struct multipath *m)
r = map_io(m, bio, mpio, 1);
if (r < 0)
bio_endio(bio, bio->bi_size, r);
bio_endio(bio, r);
else if (r == DM_MAPIO_REMAPPED)
generic_make_request(bio);
else if (r == DM_MAPIO_REQUEUE)
bio_endio(bio, bio->bi_size, -EIO);
bio_endio(bio, -EIO);
bio = next;
}

View File

@ -820,7 +820,7 @@ static void write_callback(unsigned long error, void *context)
break;
}
}
bio_endio(bio, bio->bi_size, 0);
bio_endio(bio, 0);
}
static void do_write(struct mirror_set *ms, struct bio *bio)
@ -900,7 +900,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
*/
if (unlikely(ms->log_failure))
while ((bio = bio_list_pop(&sync)))
bio_endio(bio, bio->bi_size, -EIO);
bio_endio(bio, -EIO);
else while ((bio = bio_list_pop(&sync)))
do_write(ms, bio);

View File

@ -636,7 +636,7 @@ static void error_bios(struct bio *bio)
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
bio_io_error(bio, bio->bi_size);
bio_io_error(bio);
bio = n;
}
}

View File

@ -43,7 +43,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio,
break;
}
bio_endio(bio, bio->bi_size, 0);
bio_endio(bio, 0);
/* accepted bio, don't make new request */
return DM_MAPIO_SUBMITTED;

View File

@ -484,23 +484,20 @@ static void dec_pending(struct dm_io *io, int error)
blk_add_trace_bio(io->md->queue, io->bio,
BLK_TA_COMPLETE);
bio_endio(io->bio, io->bio->bi_size, io->error);
bio_endio(io->bio, io->error);
}
free_io(io->md, io);
}
}
static int clone_endio(struct bio *bio, unsigned int done, int error)
static void clone_endio(struct bio *bio, int error)
{
int r = 0;
struct dm_target_io *tio = bio->bi_private;
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
if (bio->bi_size)
return 1;
if (!bio_flagged(bio, BIO_UPTODATE) && !error)
error = -EIO;
@ -514,7 +511,7 @@ static int clone_endio(struct bio *bio, unsigned int done, int error)
error = r;
else if (r == DM_ENDIO_INCOMPLETE)
/* The target will handle the io */
return 1;
return;
else if (r) {
DMWARN("unimplemented target endio return value: %d", r);
BUG();
@ -530,7 +527,6 @@ static int clone_endio(struct bio *bio, unsigned int done, int error)
bio_put(bio);
free_tio(md, tio);
return r;
}
static sector_t max_io_len(struct mapped_device *md,
@ -761,7 +757,7 @@ static void __split_bio(struct mapped_device *md, struct bio *bio)
ci.map = dm_get_table(md);
if (!ci.map) {
bio_io_error(bio, bio->bi_size);
bio_io_error(bio);
return;
}
@ -803,7 +799,7 @@ static int dm_request(struct request_queue *q, struct bio *bio)
* guarantee it is (or can be) handled by the targets correctly.
*/
if (unlikely(bio_barrier(bio))) {
bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
bio_endio(bio, -EOPNOTSUPP);
return 0;
}
@ -820,13 +816,13 @@ static int dm_request(struct request_queue *q, struct bio *bio)
up_read(&md->io_lock);
if (bio_rw(bio) == READA) {
bio_io_error(bio, bio->bi_size);
bio_io_error(bio);
return 0;
}
r = queue_io(md, bio);
if (r < 0) {
bio_io_error(bio, bio->bi_size);
bio_io_error(bio);
return 0;
} else if (r == 0)

View File

@ -65,18 +65,16 @@
#include <linux/raid/md.h>
static int faulty_fail(struct bio *bio, unsigned int bytes_done, int error)
static void faulty_fail(struct bio *bio, int error)
{
struct bio *b = bio->bi_private;
b->bi_size = bio->bi_size;
b->bi_sector = bio->bi_sector;
if (bio->bi_size == 0)
bio_put(bio);
clear_bit(BIO_UPTODATE, &b->bi_flags);
return (b->bi_end_io)(b, bytes_done, -EIO);
bio_io_error(b);
}
typedef struct faulty_conf {
@ -179,7 +177,7 @@ static int make_request(struct request_queue *q, struct bio *bio)
/* special case - don't decrement, don't generic_make_request,
* just fail immediately
*/
bio_endio(bio, bio->bi_size, -EIO);
bio_endio(bio, -EIO);
return 0;
}

View File

@ -338,7 +338,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
sector_t block;
if (unlikely(bio_barrier(bio))) {
bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
bio_endio(bio, -EOPNOTSUPP);
return 0;
}
@ -358,7 +358,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
bdevname(tmp_dev->rdev->bdev, b),
(unsigned long long)tmp_dev->size,
(unsigned long long)tmp_dev->offset);
bio_io_error(bio, bio->bi_size);
bio_io_error(bio);
return 0;
}
if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >

View File

@ -213,7 +213,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
static int md_fail_request (struct request_queue *q, struct bio *bio)
{
bio_io_error(bio, bio->bi_size);
bio_io_error(bio);
return 0;
}
@ -384,12 +384,10 @@ static void free_disk_sb(mdk_rdev_t * rdev)
}
static int super_written(struct bio *bio, unsigned int bytes_done, int error)
static void super_written(struct bio *bio, int error)
{
mdk_rdev_t *rdev = bio->bi_private;
mddev_t *mddev = rdev->mddev;
if (bio->bi_size)
return 1;
if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
printk("md: super_written gets error=%d, uptodate=%d\n",
@ -401,16 +399,13 @@ static int super_written(struct bio *bio, unsigned int bytes_done, int error)
if (atomic_dec_and_test(&mddev->pending_writes))
wake_up(&mddev->sb_wait);
bio_put(bio);
return 0;
}
static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error)
static void super_written_barrier(struct bio *bio, int error)
{
struct bio *bio2 = bio->bi_private;
mdk_rdev_t *rdev = bio2->bi_private;
mddev_t *mddev = rdev->mddev;
if (bio->bi_size)
return 1;
if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
error == -EOPNOTSUPP) {
@ -424,11 +419,11 @@ static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int e
spin_unlock_irqrestore(&mddev->write_lock, flags);
wake_up(&mddev->sb_wait);
bio_put(bio);
return 0;
}
} else {
bio_put(bio2);
bio->bi_private = rdev;
return super_written(bio, bytes_done, error);
super_written(bio, error);
}
}
void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
@ -489,13 +484,9 @@ void md_super_wait(mddev_t *mddev)
finish_wait(&mddev->sb_wait, &wq);
}
static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
static void bi_complete(struct bio *bio, int error)
{
if (bio->bi_size)
return 1;
complete((struct completion*)bio->bi_private);
return 0;
}
int sync_page_io(struct block_device *bdev, sector_t sector, int size,

View File

@ -82,21 +82,17 @@ static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
struct bio *bio = mp_bh->master_bio;
multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
bio_endio(bio, bio->bi_size, err);
bio_endio(bio, err);
mempool_free(mp_bh, conf->pool);
}
static int multipath_end_request(struct bio *bio, unsigned int bytes_done,
int error)
static void multipath_end_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private);
multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev;
if (bio->bi_size)
return 1;
if (uptodate)
multipath_end_bh_io(mp_bh, 0);
else if (!bio_rw_ahead(bio)) {
@ -112,7 +108,6 @@ static int multipath_end_request(struct bio *bio, unsigned int bytes_done,
} else
multipath_end_bh_io(mp_bh, error);
rdev_dec_pending(rdev, conf->mddev);
return 0;
}
static void unplug_slaves(mddev_t *mddev)
@ -155,7 +150,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
const int rw = bio_data_dir(bio);
if (unlikely(bio_barrier(bio))) {
bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
bio_endio(bio, -EOPNOTSUPP);
return 0;
}
@ -169,7 +164,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
mp_bh->path = multipath_map(conf);
if (mp_bh->path < 0) {
bio_endio(bio, bio->bi_size, -EIO);
bio_endio(bio, -EIO);
mempool_free(mp_bh, conf->pool);
return 0;
}

View File

@ -420,7 +420,7 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
const int rw = bio_data_dir(bio);
if (unlikely(bio_barrier(bio))) {
bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
bio_endio(bio, -EOPNOTSUPP);
return 0;
}
@ -490,7 +490,7 @@ bad_map:
" or bigger than %dk %llu %d\n", chunk_size,
(unsigned long long)bio->bi_sector, bio->bi_size >> 10);
bio_io_error(bio, bio->bi_size);
bio_io_error(bio);
return 0;
}

View File

@ -238,7 +238,7 @@ static void raid_end_bio_io(r1bio_t *r1_bio)
(unsigned long long) bio->bi_sector +
(bio->bi_size >> 9) - 1);
bio_endio(bio, bio->bi_size,
bio_endio(bio,
test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO);
}
free_r1bio(r1_bio);
@ -255,16 +255,13 @@ static inline void update_head_pos(int disk, r1bio_t *r1_bio)
r1_bio->sector + (r1_bio->sectors);
}
static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int error)
static void raid1_end_read_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
int mirror;
conf_t *conf = mddev_to_conf(r1_bio->mddev);
if (bio->bi_size)
return 1;
mirror = r1_bio->read_disk;
/*
* this branch is our 'one mirror IO has finished' event handler:
@ -301,10 +298,9 @@ static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int
}
rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
return 0;
}
static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int error)
static void raid1_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
@ -312,8 +308,6 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
conf_t *conf = mddev_to_conf(r1_bio->mddev);
struct bio *to_put = NULL;
if (bio->bi_size)
return 1;
for (mirror = 0; mirror < conf->raid_disks; mirror++)
if (r1_bio->bios[mirror] == bio)
@ -366,7 +360,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
(unsigned long long) mbio->bi_sector,
(unsigned long long) mbio->bi_sector +
(mbio->bi_size >> 9) - 1);
bio_endio(mbio, mbio->bi_size, 0);
bio_endio(mbio, 0);
}
}
}
@ -400,8 +394,6 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
if (to_put)
bio_put(to_put);
return 0;
}
@ -796,7 +788,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
if (rw == WRITE)
md_write_end(mddev);
bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
bio_endio(bio, -EOPNOTSUPP);
return 0;
}
@ -1137,14 +1129,11 @@ abort:
}
static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
static void end_sync_read(struct bio *bio, int error)
{
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
int i;
if (bio->bi_size)
return 1;
for (i=r1_bio->mddev->raid_disks; i--; )
if (r1_bio->bios[i] == bio)
break;
@ -1160,10 +1149,9 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
if (atomic_dec_and_test(&r1_bio->remaining))
reschedule_retry(r1_bio);
return 0;
}
static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
static void end_sync_write(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
@ -1172,9 +1160,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
int i;
int mirror=0;
if (bio->bi_size)
return 1;
for (i = 0; i < conf->raid_disks; i++)
if (r1_bio->bios[i] == bio) {
mirror = i;
@ -1200,7 +1185,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
md_done_sync(mddev, r1_bio->sectors, uptodate);
put_buf(r1_bio);
}
return 0;
}
static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)

View File

@ -227,7 +227,7 @@ static void raid_end_bio_io(r10bio_t *r10_bio)
{
struct bio *bio = r10_bio->master_bio;
bio_endio(bio, bio->bi_size,
bio_endio(bio,
test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO);
free_r10bio(r10_bio);
}
@ -243,15 +243,13 @@ static inline void update_head_pos(int slot, r10bio_t *r10_bio)
r10_bio->devs[slot].addr + (r10_bio->sectors);
}
static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int error)
static void raid10_end_read_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
int slot, dev;
conf_t *conf = mddev_to_conf(r10_bio->mddev);
if (bio->bi_size)
return 1;
slot = r10_bio->read_slot;
dev = r10_bio->devs[slot].devnum;
@ -284,19 +282,15 @@ static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int
}
rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
return 0;
}
static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, int error)
static void raid10_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
int slot, dev;
conf_t *conf = mddev_to_conf(r10_bio->mddev);
if (bio->bi_size)
return 1;
for (slot = 0; slot < conf->copies; slot++)
if (r10_bio->devs[slot].bio == bio)
break;
@ -339,7 +333,6 @@ static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, in
}
rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
return 0;
}
@ -787,7 +780,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
unsigned long flags;
if (unlikely(bio_barrier(bio))) {
bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
bio_endio(bio, -EOPNOTSUPP);
return 0;
}
@ -819,7 +812,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
" or bigger than %dk %llu %d\n", chunk_sects/2,
(unsigned long long)bio->bi_sector, bio->bi_size >> 10);
bio_io_error(bio, bio->bi_size);
bio_io_error(bio);
return 0;
}
@ -1155,15 +1148,12 @@ abort:
}
static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
static void end_sync_read(struct bio *bio, int error)
{
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
conf_t *conf = mddev_to_conf(r10_bio->mddev);
int i,d;
if (bio->bi_size)
return 1;
for (i=0; i<conf->copies; i++)
if (r10_bio->devs[i].bio == bio)
break;
@ -1192,10 +1182,9 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
reschedule_retry(r10_bio);
}
rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
return 0;
}
static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
static void end_sync_write(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
@ -1203,9 +1192,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
conf_t *conf = mddev_to_conf(mddev);
int i,d;
if (bio->bi_size)
return 1;
for (i = 0; i < conf->copies; i++)
if (r10_bio->devs[i].bio == bio)
break;
@ -1228,7 +1214,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
}
}
rdev_dec_pending(conf->mirrors[d].rdev, mddev);
return 0;
}
/*
@ -1374,7 +1359,7 @@ static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
if (test_bit(R10BIO_Uptodate, &r10_bio->state))
generic_make_request(wbio);
else
bio_endio(wbio, wbio->bi_size, -EIO);
bio_endio(wbio, -EIO);
}

View File

@ -108,12 +108,11 @@ static void return_io(struct bio *return_bi)
{
struct bio *bi = return_bi;
while (bi) {
int bytes = bi->bi_size;
return_bi = bi->bi_next;
bi->bi_next = NULL;
bi->bi_size = 0;
bi->bi_end_io(bi, bytes,
bi->bi_end_io(bi,
test_bit(BIO_UPTODATE, &bi->bi_flags)
? 0 : -EIO);
bi = return_bi;
@ -382,10 +381,10 @@ static unsigned long get_stripe_work(struct stripe_head *sh)
return pending;
}
static int
raid5_end_read_request(struct bio *bi, unsigned int bytes_done, int error);
static int
raid5_end_write_request (struct bio *bi, unsigned int bytes_done, int error);
static void
raid5_end_read_request(struct bio *bi, int error);
static void
raid5_end_write_request(struct bio *bi, int error);
static void ops_run_io(struct stripe_head *sh)
{
@ -1110,8 +1109,7 @@ static void shrink_stripes(raid5_conf_t *conf)
conf->slab_cache = NULL;
}
static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
int error)
static void raid5_end_read_request(struct bio * bi, int error)
{
struct stripe_head *sh = bi->bi_private;
raid5_conf_t *conf = sh->raid_conf;
@ -1120,8 +1118,6 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
char b[BDEVNAME_SIZE];
mdk_rdev_t *rdev;
if (bi->bi_size)
return 1;
for (i=0 ; i<disks; i++)
if (bi == &sh->dev[i].req)
@ -1132,7 +1128,7 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
uptodate);
if (i == disks) {
BUG();
return 0;
return;
}
if (uptodate) {
@ -1185,20 +1181,15 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
return 0;
}
static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
int error)
static void raid5_end_write_request (struct bio *bi, int error)
{
struct stripe_head *sh = bi->bi_private;
raid5_conf_t *conf = sh->raid_conf;
int disks = sh->disks, i;
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
if (bi->bi_size)
return 1;
for (i=0 ; i<disks; i++)
if (bi == &sh->dev[i].req)
break;
@ -1208,7 +1199,7 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
uptodate);
if (i == disks) {
BUG();
return 0;
return;
}
if (!uptodate)
@ -1219,7 +1210,6 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
return 0;
}
@ -3340,7 +3330,7 @@ static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
* first).
* If the read failed..
*/
static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
static void raid5_align_endio(struct bio *bi, int error)
{
struct bio* raid_bi = bi->bi_private;
mddev_t *mddev;
@ -3348,8 +3338,6 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
mdk_rdev_t *rdev;
if (bi->bi_size)
return 1;
bio_put(bi);
mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
@ -3360,17 +3348,16 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
rdev_dec_pending(rdev, conf->mddev);
if (!error && uptodate) {
bio_endio(raid_bi, bytes, 0);
bio_endio(raid_bi, 0);
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
return 0;
return;
}
pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
add_bio_to_retry(raid_bi, conf);
return 0;
}
static int bio_fits_rdev(struct bio *bi)
@ -3476,7 +3463,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
int remaining;
if (unlikely(bio_barrier(bi))) {
bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
bio_endio(bi, -EOPNOTSUPP);
return 0;
}
@ -3592,12 +3579,11 @@ static int make_request(struct request_queue *q, struct bio * bi)
remaining = --bi->bi_phys_segments;
spin_unlock_irq(&conf->device_lock);
if (remaining == 0) {
int bytes = bi->bi_size;
if ( rw == WRITE )
md_write_end(mddev);
bi->bi_size = 0;
bi->bi_end_io(bi, bytes,
bi->bi_end_io(bi,
test_bit(BIO_UPTODATE, &bi->bi_flags)
? 0 : -EIO);
}
@ -3875,10 +3861,8 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
remaining = --raid_bio->bi_phys_segments;
spin_unlock_irq(&conf->device_lock);
if (remaining == 0) {
int bytes = raid_bio->bi_size;
raid_bio->bi_size = 0;
raid_bio->bi_end_io(raid_bio, bytes,
raid_bio->bi_end_io(raid_bio,
test_bit(BIO_UPTODATE, &raid_bio->bi_flags)
? 0 : -EIO);
}

View File

@ -472,14 +472,13 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
struct dasd_ccw_req *cqr;
struct dasd_diag_req *dreq;
struct dasd_diag_bio *dbio;
struct bio *bio;
struct req_iterator iter;
struct bio_vec *bv;
char *dst;
unsigned int count, datasize;
sector_t recid, first_rec, last_rec;
unsigned int blksize, off;
unsigned char rw_cmd;
int i;
if (rq_data_dir(req) == READ)
rw_cmd = MDSK_READ_REQ;
@ -493,14 +492,12 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift;
/* Check struct bio and count the number of blocks for the request. */
count = 0;
rq_for_each_bio(bio, req) {
bio_for_each_segment(bv, bio, i) {
rq_for_each_segment(bv, req, iter) {
if (bv->bv_len & (blksize - 1))
/* Fba can only do full blocks. */
return ERR_PTR(-EINVAL);
count += bv->bv_len >> (device->s2b_shift + 9);
}
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
return ERR_PTR(-EINVAL);
@ -516,8 +513,7 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
dreq->block_count = count;
dbio = dreq->bio;
recid = first_rec;
rq_for_each_bio(bio, req) {
bio_for_each_segment(bv, bio, i) {
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
for (off = 0; off < bv->bv_len; off += blksize) {
memset(dbio, 0, sizeof (struct dasd_diag_bio));
@ -529,7 +525,6 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
recid++;
}
}
}
cqr->retries = DIAG_MAX_RETRIES;
cqr->buildclk = get_clock();
if (req->cmd_flags & REQ_FAILFAST)

View File

@ -1176,7 +1176,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
struct LO_eckd_data *LO_data;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
struct bio *bio;
struct req_iterator iter;
struct bio_vec *bv;
char *dst;
unsigned int blksize, blk_per_trk, off;
@ -1185,7 +1185,6 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
sector_t first_trk, last_trk;
unsigned int first_offs, last_offs;
unsigned char cmd, rcmd;
int i;
private = (struct dasd_eckd_private *) device->private;
if (rq_data_dir(req) == READ)
@ -1206,19 +1205,16 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
/* Check struct bio and count the number of blocks for the request. */
count = 0;
cidaw = 0;
rq_for_each_bio(bio, req) {
bio_for_each_segment(bv, bio, i) {
rq_for_each_segment(bv, req, iter) {
if (bv->bv_len & (blksize - 1))
/* Eckd can only do full blocks. */
return ERR_PTR(-EINVAL);
count += bv->bv_len >> (device->s2b_shift + 9);
#if defined(CONFIG_64BIT)
if (idal_is_needed (page_address(bv->bv_page),
bv->bv_len))
if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
cidaw += bv->bv_len >> (device->s2b_shift + 9);
#endif
}
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
return ERR_PTR(-EINVAL);
@ -1257,7 +1253,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
last_rec - recid + 1, cmd, device, blksize);
}
rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
@ -1328,12 +1324,12 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
{
struct dasd_eckd_private *private;
struct ccw1 *ccw;
struct bio *bio;
struct req_iterator iter;
struct bio_vec *bv;
char *dst, *cda;
unsigned int blksize, blk_per_trk, off;
sector_t recid;
int i, status;
int status;
if (!dasd_page_cache)
goto out;
@ -1346,7 +1342,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
ccw++;
if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
ccw++;
rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
for (off = 0; off < bv->bv_len; off += blksize) {
/* Skip locate record. */

View File

@ -234,14 +234,13 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
struct LO_fba_data *LO_data;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
struct bio *bio;
struct req_iterator iter;
struct bio_vec *bv;
char *dst;
int count, cidaw, cplength, datasize;
sector_t recid, first_rec, last_rec;
unsigned int blksize, off;
unsigned char cmd;
int i;
private = (struct dasd_fba_private *) device->private;
if (rq_data_dir(req) == READ) {
@ -257,19 +256,16 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
/* Check struct bio and count the number of blocks for the request. */
count = 0;
cidaw = 0;
rq_for_each_bio(bio, req) {
bio_for_each_segment(bv, bio, i) {
rq_for_each_segment(bv, req, iter) {
if (bv->bv_len & (blksize - 1))
/* Fba can only do full blocks. */
return ERR_PTR(-EINVAL);
count += bv->bv_len >> (device->s2b_shift + 9);
#if defined(CONFIG_64BIT)
if (idal_is_needed (page_address(bv->bv_page),
bv->bv_len))
if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
cidaw += bv->bv_len / blksize;
#endif
}
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
return ERR_PTR(-EINVAL);
@ -304,7 +300,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count);
}
recid = first_rec;
rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
@ -359,11 +355,11 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
{
struct dasd_fba_private *private;
struct ccw1 *ccw;
struct bio *bio;
struct req_iterator iter;
struct bio_vec *bv;
char *dst, *cda;
unsigned int blksize, off;
int i, status;
int status;
if (!dasd_page_cache)
goto out;
@ -374,7 +370,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
ccw++;
if (private->rdc_data.mode.bits.data_chain != 0)
ccw++;
rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
for (off = 0; off < bv->bv_len; off += blksize) {
/* Skip locate record. */

View File

@ -674,10 +674,10 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
}
bytes_done += bvec->bv_len;
}
bio_endio(bio, bytes_done, 0);
bio_endio(bio, 0);
return 0;
fail:
bio_io_error(bio, bio->bi_size);
bio_io_error(bio);
return 0;
}

View File

@ -230,12 +230,10 @@ static int xpram_make_request(struct request_queue *q, struct bio *bio)
}
}
set_bit(BIO_UPTODATE, &bio->bi_flags);
bytes = bio->bi_size;
bio->bi_size = 0;
bio->bi_end_io(bio, bytes, 0);
bio_end_io(bio, 0);
return 0;
fail:
bio_io_error(bio, bio->bi_size);
bio_io_error(bio);
return 0;
}

View File

@ -1134,21 +1134,18 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
{
struct tape_request *request;
struct ccw1 *ccw;
int count = 0, i;
int count = 0;
unsigned off;
char *dst;
struct bio_vec *bv;
struct bio *bio;
struct req_iterator iter;
struct tape_34xx_block_id * start_block;
DBF_EVENT(6, "xBREDid:");
/* Count the number of blocks for the request. */
rq_for_each_bio(bio, req) {
bio_for_each_segment(bv, bio, i) {
rq_for_each_segment(bv, req, iter)
count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
}
}
/* Allocate the ccw request. */
request = tape_alloc_request(3+count+1, 8);
@ -1175,11 +1172,9 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
rq_for_each_bio(bio, req) {
bio_for_each_segment(bv, bio, i) {
rq_for_each_segment(bv, req, iter) {
dst = kmap(bv->bv_page) + bv->bv_offset;
for (off = 0; off < bv->bv_len;
off += TAPEBLOCK_HSEC_SIZE) {
for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
ccw->flags = CCW_FLAG_CC;
ccw->cmd_code = READ_FORWARD;
ccw->count = TAPEBLOCK_HSEC_SIZE;
@ -1188,7 +1183,6 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
dst += TAPEBLOCK_HSEC_SIZE;
}
}
}
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
DBF_EVENT(6, "xBREDccwg\n");

View File

@ -623,21 +623,19 @@ tape_3590_bread(struct tape_device *device, struct request *req)
{
struct tape_request *request;
struct ccw1 *ccw;
int count = 0, start_block, i;
int count = 0, start_block;
unsigned off;
char *dst;
struct bio_vec *bv;
struct bio *bio;
struct req_iterator iter;
DBF_EVENT(6, "xBREDid:");
start_block = req->sector >> TAPEBLOCK_HSEC_S2B;
DBF_EVENT(6, "start_block = %i\n", start_block);
rq_for_each_bio(bio, req) {
bio_for_each_segment(bv, bio, i) {
rq_for_each_segment(bv, req, iter)
count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
}
}
request = tape_alloc_request(2 + count + 1, 4);
if (IS_ERR(request))
return request;
@ -653,11 +651,9 @@ tape_3590_bread(struct tape_device *device, struct request *req)
*/
ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
rq_for_each_bio(bio, req) {
bio_for_each_segment(bv, bio, i) {
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
for (off = 0; off < bv->bv_len;
off += TAPEBLOCK_HSEC_SIZE) {
for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
ccw->flags = CCW_FLAG_CC;
ccw->cmd_code = READ_FORWARD;
ccw->count = TAPEBLOCK_HSEC_SIZE;
@ -668,7 +664,6 @@ tape_3590_bread(struct tape_device *device, struct request *req)
if (off > bv->bv_len)
BUG();
}
}
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
DBF_EVENT(6, "xBREDccwg\n");
return request;

View File

@ -263,25 +263,12 @@ static int scsi_merge_bio(struct request *rq, struct bio *bio)
bio->bi_rw |= (1 << BIO_RW);
blk_queue_bounce(q, &bio);
if (!rq->bio)
blk_rq_bio_prep(q, rq, bio);
else if (!ll_back_merge_fn(q, rq, bio))
return -EINVAL;
else {
rq->biotail->bi_next = bio;
rq->biotail = bio;
return blk_rq_append_bio(q, rq, bio);
}
return 0;
}
static int scsi_bi_endio(struct bio *bio, unsigned int bytes_done, int error)
static void scsi_bi_endio(struct bio *bio, int error)
{
if (bio->bi_size)
return 1;
bio_put(bio);
return 0;
}
/**
@ -337,7 +324,7 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
if (bio->bi_vcnt >= nr_vecs) {
err = scsi_merge_bio(rq, bio);
if (err) {
bio_endio(bio, bio->bi_size, 0);
bio_endio(bio, 0);
goto free_bios;
}
bio = NULL;
@ -359,7 +346,7 @@ free_bios:
/*
* call endio instead of bio_put incase it was bounced
*/
bio_endio(bio, bio->bi_size, 0);
bio_endio(bio, 0);
}
return err;

View File

@ -798,13 +798,9 @@ void bio_unmap_user(struct bio *bio)
bio_put(bio);
}
static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err)
static void bio_map_kern_endio(struct bio *bio, int err)
{
if (bio->bi_size)
return 1;
bio_put(bio);
return 0;
}
@ -1002,34 +998,26 @@ void bio_check_pages_dirty(struct bio *bio)
/**
* bio_endio - end I/O on a bio
* @bio: bio
* @bytes_done: number of bytes completed
* @error: error, if any
*
* Description:
* bio_endio() will end I/O on @bytes_done number of bytes. This may be
* just a partial part of the bio, or it may be the whole bio. bio_endio()
* is the preferred way to end I/O on a bio, it takes care of decrementing
* bi_size and clearing BIO_UPTODATE on error. @error is 0 on success, and
* and one of the established -Exxxx (-EIO, for instance) error values in
* case something went wrong. Noone should call bi_end_io() directly on
* a bio unless they own it and thus know that it has an end_io function.
* bio_endio() will end I/O on the whole bio. bio_endio() is the
* preferred way to end I/O on a bio, it takes care of clearing
* BIO_UPTODATE on error. @error is 0 on success, and and one of the
* established -Exxxx (-EIO, for instance) error values in case
* something went wrong. Noone should call bi_end_io() directly on a
* bio unless they own it and thus know that it has an end_io
* function.
**/
void bio_endio(struct bio *bio, unsigned int bytes_done, int error)
void bio_endio(struct bio *bio, int error)
{
if (error)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
if (unlikely(bytes_done > bio->bi_size)) {
printk("%s: want %u bytes done, only %u left\n", __FUNCTION__,
bytes_done, bio->bi_size);
bytes_done = bio->bi_size;
}
bio->bi_size -= bytes_done;
bio->bi_sector += (bytes_done >> 9);
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
error = -EIO;
if (bio->bi_end_io)
bio->bi_end_io(bio, bytes_done, error);
bio->bi_end_io(bio, error);
}
void bio_pair_release(struct bio_pair *bp)
@ -1037,37 +1025,29 @@ void bio_pair_release(struct bio_pair *bp)
if (atomic_dec_and_test(&bp->cnt)) {
struct bio *master = bp->bio1.bi_private;
bio_endio(master, master->bi_size, bp->error);
bio_endio(master, bp->error);
mempool_free(bp, bp->bio2.bi_private);
}
}
static int bio_pair_end_1(struct bio * bi, unsigned int done, int err)
static void bio_pair_end_1(struct bio *bi, int err)
{
struct bio_pair *bp = container_of(bi, struct bio_pair, bio1);
if (err)
bp->error = err;
if (bi->bi_size)
return 1;
bio_pair_release(bp);
return 0;
}
static int bio_pair_end_2(struct bio * bi, unsigned int done, int err)
static void bio_pair_end_2(struct bio *bi, int err)
{
struct bio_pair *bp = container_of(bi, struct bio_pair, bio2);
if (err)
bp->error = err;
if (bi->bi_size)
return 1;
bio_pair_release(bp);
return 0;
}
/*

View File

@ -172,7 +172,7 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
}
#if 0
static int blk_end_aio(struct bio *bio, unsigned int bytes_done, int error)
static void blk_end_aio(struct bio *bio, int error)
{
struct kiocb *iocb = bio->bi_private;
atomic_t *bio_count = &iocb->ki_bio_count;

View File

@ -2634,13 +2634,10 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
return tmp.b_blocknr;
}
static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
static void end_bio_bh_io_sync(struct bio *bio, int err)
{
struct buffer_head *bh = bio->bi_private;
if (bio->bi_size)
return 1;
if (err == -EOPNOTSUPP) {
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
set_bit(BH_Eopnotsupp, &bh->b_state);
@ -2648,7 +2645,6 @@ static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
bio_put(bio);
return 0;
}
int submit_bh(int rw, struct buffer_head * bh)

View File

@ -21,7 +21,6 @@
#include <linux/if.h>
#include <linux/if_bridge.h>
#include <linux/slab.h>
#include <linux/hdreg.h>
#include <linux/raid/md.h>
#include <linux/kd.h>
#include <linux/dirent.h>
@ -33,12 +32,10 @@
#include <linux/vt.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/fd.h>
#include <linux/ppp_defs.h>
#include <linux/if_ppp.h>
#include <linux/if_pppox.h>
#include <linux/mtio.h>
#include <linux/cdrom.h>
#include <linux/auto_fs.h>
#include <linux/auto_fs4.h>
#include <linux/tty.h>
@ -48,7 +45,6 @@
#include <linux/netdevice.h>
#include <linux/raw.h>
#include <linux/smb_fs.h>
#include <linux/blkpg.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/rtc.h>
@ -62,7 +58,6 @@
#include <linux/i2c-dev.h>
#include <linux/wireless.h>
#include <linux/atalk.h>
#include <linux/blktrace_api.h>
#include <linux/loop.h>
#include <net/bluetooth/bluetooth.h>
@ -668,53 +663,6 @@ out:
#endif
#ifdef CONFIG_BLOCK
struct hd_geometry32 {
unsigned char heads;
unsigned char sectors;
unsigned short cylinders;
u32 start;
};
static int hdio_getgeo(unsigned int fd, unsigned int cmd, unsigned long arg)
{
mm_segment_t old_fs = get_fs();
struct hd_geometry geo;
struct hd_geometry32 __user *ugeo;
int err;
set_fs (KERNEL_DS);
err = sys_ioctl(fd, HDIO_GETGEO, (unsigned long)&geo);
set_fs (old_fs);
ugeo = compat_ptr(arg);
if (!err) {
err = copy_to_user (ugeo, &geo, 4);
err |= __put_user (geo.start, &ugeo->start);
if (err)
err = -EFAULT;
}
return err;
}
static int hdio_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
{
mm_segment_t old_fs = get_fs();
unsigned long kval;
unsigned int __user *uvp;
int error;
set_fs(KERNEL_DS);
error = sys_ioctl(fd, cmd, (long)&kval);
set_fs(old_fs);
if(error == 0) {
uvp = compat_ptr(arg);
if(put_user(kval, uvp))
error = -EFAULT;
}
return error;
}
typedef struct sg_io_hdr32 {
compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
compat_int_t dxfer_direction; /* [i] data transfer direction */
@ -1089,108 +1037,6 @@ static int mt_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
return err ? -EFAULT: 0;
}
struct cdrom_read_audio32 {
union cdrom_addr addr;
u8 addr_format;
compat_int_t nframes;
compat_caddr_t buf;
};
struct cdrom_generic_command32 {
unsigned char cmd[CDROM_PACKET_SIZE];
compat_caddr_t buffer;
compat_uint_t buflen;
compat_int_t stat;
compat_caddr_t sense;
unsigned char data_direction;
compat_int_t quiet;
compat_int_t timeout;
compat_caddr_t reserved[1];
};
static int cdrom_do_read_audio(unsigned int fd, unsigned int cmd, unsigned long arg)
{
struct cdrom_read_audio __user *cdread_audio;
struct cdrom_read_audio32 __user *cdread_audio32;
__u32 data;
void __user *datap;
cdread_audio = compat_alloc_user_space(sizeof(*cdread_audio));
cdread_audio32 = compat_ptr(arg);
if (copy_in_user(&cdread_audio->addr,
&cdread_audio32->addr,
(sizeof(*cdread_audio32) -
sizeof(compat_caddr_t))))
return -EFAULT;
if (get_user(data, &cdread_audio32->buf))
return -EFAULT;
datap = compat_ptr(data);
if (put_user(datap, &cdread_audio->buf))
return -EFAULT;
return sys_ioctl(fd, cmd, (unsigned long) cdread_audio);
}
static int cdrom_do_generic_command(unsigned int fd, unsigned int cmd, unsigned long arg)
{
struct cdrom_generic_command __user *cgc;
struct cdrom_generic_command32 __user *cgc32;
u32 data;
unsigned char dir;
int itmp;
cgc = compat_alloc_user_space(sizeof(*cgc));
cgc32 = compat_ptr(arg);
if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
get_user(data, &cgc32->buffer) ||
put_user(compat_ptr(data), &cgc->buffer) ||
copy_in_user(&cgc->buflen, &cgc32->buflen,
(sizeof(unsigned int) + sizeof(int))) ||
get_user(data, &cgc32->sense) ||
put_user(compat_ptr(data), &cgc->sense) ||
get_user(dir, &cgc32->data_direction) ||
put_user(dir, &cgc->data_direction) ||
get_user(itmp, &cgc32->quiet) ||
put_user(itmp, &cgc->quiet) ||
get_user(itmp, &cgc32->timeout) ||
put_user(itmp, &cgc->timeout) ||
get_user(data, &cgc32->reserved[0]) ||
put_user(compat_ptr(data), &cgc->reserved[0]))
return -EFAULT;
return sys_ioctl(fd, cmd, (unsigned long) cgc);
}
static int cdrom_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
{
int err;
switch(cmd) {
case CDROMREADAUDIO:
err = cdrom_do_read_audio(fd, cmd, arg);
break;
case CDROM_SEND_PACKET:
err = cdrom_do_generic_command(fd, cmd, arg);
break;
default:
do {
static int count;
if (++count <= 20)
printk("cdrom_ioctl: Unknown cmd fd(%d) "
"cmd(%08x) arg(%08x)\n",
(int)fd, (unsigned int)cmd, (unsigned int)arg);
} while(0);
err = -EINVAL;
break;
};
return err;
}
#endif /* CONFIG_BLOCK */
#ifdef CONFIG_VT
@ -1536,71 +1382,11 @@ ret_einval(unsigned int fd, unsigned int cmd, unsigned long arg)
return -EINVAL;
}
#ifdef CONFIG_BLOCK
static int broken_blkgetsize(unsigned int fd, unsigned int cmd, unsigned long arg)
{
/* The mkswap binary hard codes it to Intel value :-((( */
return w_long(fd, BLKGETSIZE, arg);
}
struct blkpg_ioctl_arg32 {
compat_int_t op;
compat_int_t flags;
compat_int_t datalen;
compat_caddr_t data;
};
static int blkpg_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
{
struct blkpg_ioctl_arg32 __user *ua32 = compat_ptr(arg);
struct blkpg_ioctl_arg __user *a = compat_alloc_user_space(sizeof(*a));
compat_caddr_t udata;
compat_int_t n;
int err;
err = get_user(n, &ua32->op);
err |= put_user(n, &a->op);
err |= get_user(n, &ua32->flags);
err |= put_user(n, &a->flags);
err |= get_user(n, &ua32->datalen);
err |= put_user(n, &a->datalen);
err |= get_user(udata, &ua32->data);
err |= put_user(compat_ptr(udata), &a->data);
if (err)
return err;
return sys_ioctl(fd, cmd, (unsigned long)a);
}
#endif
static int ioc_settimeout(unsigned int fd, unsigned int cmd, unsigned long arg)
{
return rw_long(fd, AUTOFS_IOC_SETTIMEOUT, arg);
}
#ifdef CONFIG_BLOCK
/* Fix sizeof(sizeof()) breakage */
#define BLKBSZGET_32 _IOR(0x12,112,int)
#define BLKBSZSET_32 _IOW(0x12,113,int)
#define BLKGETSIZE64_32 _IOR(0x12,114,int)
static int do_blkbszget(unsigned int fd, unsigned int cmd, unsigned long arg)
{
return sys_ioctl(fd, BLKBSZGET, (unsigned long)compat_ptr(arg));
}
static int do_blkbszset(unsigned int fd, unsigned int cmd, unsigned long arg)
{
return sys_ioctl(fd, BLKBSZSET, (unsigned long)compat_ptr(arg));
}
static int do_blkgetsize64(unsigned int fd, unsigned int cmd,
unsigned long arg)
{
return sys_ioctl(fd, BLKGETSIZE64, (unsigned long)compat_ptr(arg));
}
#endif
/* Bluetooth ioctls */
#define HCIUARTSETPROTO _IOW('U', 200, int)
#define HCIUARTGETPROTO _IOR('U', 201, int)
@ -1620,333 +1406,6 @@ static int do_blkgetsize64(unsigned int fd, unsigned int cmd,
#define HIDPGETCONNLIST _IOR('H', 210, int)
#define HIDPGETCONNINFO _IOR('H', 211, int)
#ifdef CONFIG_BLOCK
struct floppy_struct32 {
compat_uint_t size;
compat_uint_t sect;
compat_uint_t head;
compat_uint_t track;
compat_uint_t stretch;
unsigned char gap;
unsigned char rate;
unsigned char spec1;
unsigned char fmt_gap;
const compat_caddr_t name;
};
struct floppy_drive_params32 {
char cmos;
compat_ulong_t max_dtr;
compat_ulong_t hlt;
compat_ulong_t hut;
compat_ulong_t srt;
compat_ulong_t spinup;
compat_ulong_t spindown;
unsigned char spindown_offset;
unsigned char select_delay;
unsigned char rps;
unsigned char tracks;
compat_ulong_t timeout;
unsigned char interleave_sect;
struct floppy_max_errors max_errors;
char flags;
char read_track;
short autodetect[8];
compat_int_t checkfreq;
compat_int_t native_format;
};
struct floppy_drive_struct32 {
signed char flags;
compat_ulong_t spinup_date;
compat_ulong_t select_date;
compat_ulong_t first_read_date;
short probed_format;
short track;
short maxblock;
short maxtrack;
compat_int_t generation;
compat_int_t keep_data;
compat_int_t fd_ref;
compat_int_t fd_device;
compat_int_t last_checked;
compat_caddr_t dmabuf;
compat_int_t bufblocks;
};
struct floppy_fdc_state32 {
compat_int_t spec1;
compat_int_t spec2;
compat_int_t dtr;
unsigned char version;
unsigned char dor;
compat_ulong_t address;
unsigned int rawcmd:2;
unsigned int reset:1;
unsigned int need_configure:1;
unsigned int perp_mode:2;
unsigned int has_fifo:1;
unsigned int driver_version;
unsigned char track[4];
};
struct floppy_write_errors32 {
unsigned int write_errors;
compat_ulong_t first_error_sector;
compat_int_t first_error_generation;
compat_ulong_t last_error_sector;
compat_int_t last_error_generation;
compat_uint_t badness;
};
#define FDSETPRM32 _IOW(2, 0x42, struct floppy_struct32)
#define FDDEFPRM32 _IOW(2, 0x43, struct floppy_struct32)
#define FDGETPRM32 _IOR(2, 0x04, struct floppy_struct32)
#define FDSETDRVPRM32 _IOW(2, 0x90, struct floppy_drive_params32)
#define FDGETDRVPRM32 _IOR(2, 0x11, struct floppy_drive_params32)
#define FDGETDRVSTAT32 _IOR(2, 0x12, struct floppy_drive_struct32)
#define FDPOLLDRVSTAT32 _IOR(2, 0x13, struct floppy_drive_struct32)
#define FDGETFDCSTAT32 _IOR(2, 0x15, struct floppy_fdc_state32)
#define FDWERRORGET32 _IOR(2, 0x17, struct floppy_write_errors32)
static struct {
unsigned int cmd32;
unsigned int cmd;
} fd_ioctl_trans_table[] = {
{ FDSETPRM32, FDSETPRM },
{ FDDEFPRM32, FDDEFPRM },
{ FDGETPRM32, FDGETPRM },
{ FDSETDRVPRM32, FDSETDRVPRM },
{ FDGETDRVPRM32, FDGETDRVPRM },
{ FDGETDRVSTAT32, FDGETDRVSTAT },
{ FDPOLLDRVSTAT32, FDPOLLDRVSTAT },
{ FDGETFDCSTAT32, FDGETFDCSTAT },
{ FDWERRORGET32, FDWERRORGET }
};
#define NR_FD_IOCTL_TRANS ARRAY_SIZE(fd_ioctl_trans_table)
static int fd_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
{
mm_segment_t old_fs = get_fs();
void *karg = NULL;
unsigned int kcmd = 0;
int i, err;
for (i = 0; i < NR_FD_IOCTL_TRANS; i++)
if (cmd == fd_ioctl_trans_table[i].cmd32) {
kcmd = fd_ioctl_trans_table[i].cmd;
break;
}
if (!kcmd)
return -EINVAL;
switch (cmd) {
case FDSETPRM32:
case FDDEFPRM32:
case FDGETPRM32:
{
compat_uptr_t name;
struct floppy_struct32 __user *uf;
struct floppy_struct *f;
uf = compat_ptr(arg);
f = karg = kmalloc(sizeof(struct floppy_struct), GFP_KERNEL);
if (!karg)
return -ENOMEM;
if (cmd == FDGETPRM32)
break;
err = __get_user(f->size, &uf->size);
err |= __get_user(f->sect, &uf->sect);
err |= __get_user(f->head, &uf->head);
err |= __get_user(f->track, &uf->track);
err |= __get_user(f->stretch, &uf->stretch);
err |= __get_user(f->gap, &uf->gap);
err |= __get_user(f->rate, &uf->rate);
err |= __get_user(f->spec1, &uf->spec1);
err |= __get_user(f->fmt_gap, &uf->fmt_gap);
err |= __get_user(name, &uf->name);
f->name = compat_ptr(name);
if (err) {
err = -EFAULT;
goto out;
}
break;
}
case FDSETDRVPRM32:
case FDGETDRVPRM32:
{
struct floppy_drive_params32 __user *uf;
struct floppy_drive_params *f;
uf = compat_ptr(arg);
f = karg = kmalloc(sizeof(struct floppy_drive_params), GFP_KERNEL);
if (!karg)
return -ENOMEM;
if (cmd == FDGETDRVPRM32)
break;
err = __get_user(f->cmos, &uf->cmos);
err |= __get_user(f->max_dtr, &uf->max_dtr);
err |= __get_user(f->hlt, &uf->hlt);
err |= __get_user(f->hut, &uf->hut);
err |= __get_user(f->srt, &uf->srt);
err |= __get_user(f->spinup, &uf->spinup);
err |= __get_user(f->spindown, &uf->spindown);
err |= __get_user(f->spindown_offset, &uf->spindown_offset);
err |= __get_user(f->select_delay, &uf->select_delay);
err |= __get_user(f->rps, &uf->rps);
err |= __get_user(f->tracks, &uf->tracks);
err |= __get_user(f->timeout, &uf->timeout);
err |= __get_user(f->interleave_sect, &uf->interleave_sect);
err |= __copy_from_user(&f->max_errors, &uf->max_errors, sizeof(f->max_errors));
err |= __get_user(f->flags, &uf->flags);
err |= __get_user(f->read_track, &uf->read_track);
err |= __copy_from_user(f->autodetect, uf->autodetect, sizeof(f->autodetect));
err |= __get_user(f->checkfreq, &uf->checkfreq);
err |= __get_user(f->native_format, &uf->native_format);
if (err) {
err = -EFAULT;
goto out;
}
break;
}
case FDGETDRVSTAT32:
case FDPOLLDRVSTAT32:
karg = kmalloc(sizeof(struct floppy_drive_struct), GFP_KERNEL);
if (!karg)
return -ENOMEM;
break;
case FDGETFDCSTAT32:
karg = kmalloc(sizeof(struct floppy_fdc_state), GFP_KERNEL);
if (!karg)
return -ENOMEM;
break;
case FDWERRORGET32:
karg = kmalloc(sizeof(struct floppy_write_errors), GFP_KERNEL);
if (!karg)
return -ENOMEM;
break;
default:
return -EINVAL;
}
set_fs (KERNEL_DS);
err = sys_ioctl (fd, kcmd, (unsigned long)karg);
set_fs (old_fs);
if (err)
goto out;
switch (cmd) {
case FDGETPRM32:
{
struct floppy_struct *f = karg;
struct floppy_struct32 __user *uf = compat_ptr(arg);
err = __put_user(f->size, &uf->size);
err |= __put_user(f->sect, &uf->sect);
err |= __put_user(f->head, &uf->head);
err |= __put_user(f->track, &uf->track);
err |= __put_user(f->stretch, &uf->stretch);
err |= __put_user(f->gap, &uf->gap);
err |= __put_user(f->rate, &uf->rate);
err |= __put_user(f->spec1, &uf->spec1);
err |= __put_user(f->fmt_gap, &uf->fmt_gap);
err |= __put_user((u64)f->name, (compat_caddr_t __user *)&uf->name);
break;
}
case FDGETDRVPRM32:
{
struct floppy_drive_params32 __user *uf;
struct floppy_drive_params *f = karg;
uf = compat_ptr(arg);
err = __put_user(f->cmos, &uf->cmos);
err |= __put_user(f->max_dtr, &uf->max_dtr);
err |= __put_user(f->hlt, &uf->hlt);
err |= __put_user(f->hut, &uf->hut);
err |= __put_user(f->srt, &uf->srt);
err |= __put_user(f->spinup, &uf->spinup);
err |= __put_user(f->spindown, &uf->spindown);
err |= __put_user(f->spindown_offset, &uf->spindown_offset);
err |= __put_user(f->select_delay, &uf->select_delay);
err |= __put_user(f->rps, &uf->rps);
err |= __put_user(f->tracks, &uf->tracks);
err |= __put_user(f->timeout, &uf->timeout);
err |= __put_user(f->interleave_sect, &uf->interleave_sect);
err |= __copy_to_user(&uf->max_errors, &f->max_errors, sizeof(f->max_errors));
err |= __put_user(f->flags, &uf->flags);
err |= __put_user(f->read_track, &uf->read_track);
err |= __copy_to_user(uf->autodetect, f->autodetect, sizeof(f->autodetect));
err |= __put_user(f->checkfreq, &uf->checkfreq);
err |= __put_user(f->native_format, &uf->native_format);
break;
}
case FDGETDRVSTAT32:
case FDPOLLDRVSTAT32:
{
struct floppy_drive_struct32 __user *uf;
struct floppy_drive_struct *f = karg;
uf = compat_ptr(arg);
err = __put_user(f->flags, &uf->flags);
err |= __put_user(f->spinup_date, &uf->spinup_date);
err |= __put_user(f->select_date, &uf->select_date);
err |= __put_user(f->first_read_date, &uf->first_read_date);
err |= __put_user(f->probed_format, &uf->probed_format);
err |= __put_user(f->track, &uf->track);
err |= __put_user(f->maxblock, &uf->maxblock);
err |= __put_user(f->maxtrack, &uf->maxtrack);
err |= __put_user(f->generation, &uf->generation);
err |= __put_user(f->keep_data, &uf->keep_data);
err |= __put_user(f->fd_ref, &uf->fd_ref);
err |= __put_user(f->fd_device, &uf->fd_device);
err |= __put_user(f->last_checked, &uf->last_checked);
err |= __put_user((u64)f->dmabuf, &uf->dmabuf);
err |= __put_user((u64)f->bufblocks, &uf->bufblocks);
break;
}
case FDGETFDCSTAT32:
{
struct floppy_fdc_state32 __user *uf;
struct floppy_fdc_state *f = karg;
uf = compat_ptr(arg);
err = __put_user(f->spec1, &uf->spec1);
err |= __put_user(f->spec2, &uf->spec2);
err |= __put_user(f->dtr, &uf->dtr);
err |= __put_user(f->version, &uf->version);
err |= __put_user(f->dor, &uf->dor);
err |= __put_user(f->address, &uf->address);
err |= __copy_to_user((char __user *)&uf->address + sizeof(uf->address),
(char *)&f->address + sizeof(f->address), sizeof(int));
err |= __put_user(f->driver_version, &uf->driver_version);
err |= __copy_to_user(uf->track, f->track, sizeof(f->track));
break;
}
case FDWERRORGET32:
{
struct floppy_write_errors32 __user *uf;
struct floppy_write_errors *f = karg;
uf = compat_ptr(arg);
err = __put_user(f->write_errors, &uf->write_errors);
err |= __put_user(f->first_error_sector, &uf->first_error_sector);
err |= __put_user(f->first_error_generation, &uf->first_error_generation);
err |= __put_user(f->last_error_sector, &uf->last_error_sector);
err |= __put_user(f->last_error_generation, &uf->last_error_generation);
err |= __put_user(f->badness, &uf->badness);
break;
}
default:
break;
}
if (err)
err = -EFAULT;
out:
kfree(karg);
return err;
}
#endif
struct mtd_oob_buf32 {
u_int32_t start;
u_int32_t length;
@ -2506,60 +1965,6 @@ COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */
/* 0x00 */
COMPATIBLE_IOCTL(FIBMAP)
COMPATIBLE_IOCTL(FIGETBSZ)
/* 0x03 -- HD/IDE ioctl's used by hdparm and friends.
* Some need translations, these do not.
*/
COMPATIBLE_IOCTL(HDIO_GET_IDENTITY)
COMPATIBLE_IOCTL(HDIO_DRIVE_TASK)
COMPATIBLE_IOCTL(HDIO_DRIVE_CMD)
ULONG_IOCTL(HDIO_SET_MULTCOUNT)
ULONG_IOCTL(HDIO_SET_UNMASKINTR)
ULONG_IOCTL(HDIO_SET_KEEPSETTINGS)
ULONG_IOCTL(HDIO_SET_32BIT)
ULONG_IOCTL(HDIO_SET_NOWERR)
ULONG_IOCTL(HDIO_SET_DMA)
ULONG_IOCTL(HDIO_SET_PIO_MODE)
ULONG_IOCTL(HDIO_SET_NICE)
ULONG_IOCTL(HDIO_SET_WCACHE)
ULONG_IOCTL(HDIO_SET_ACOUSTIC)
ULONG_IOCTL(HDIO_SET_BUSSTATE)
ULONG_IOCTL(HDIO_SET_ADDRESS)
COMPATIBLE_IOCTL(HDIO_SCAN_HWIF)
/* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */
COMPATIBLE_IOCTL(0x330)
/* 0x02 -- Floppy ioctls */
COMPATIBLE_IOCTL(FDMSGON)
COMPATIBLE_IOCTL(FDMSGOFF)
COMPATIBLE_IOCTL(FDSETEMSGTRESH)
COMPATIBLE_IOCTL(FDFLUSH)
COMPATIBLE_IOCTL(FDWERRORCLR)
COMPATIBLE_IOCTL(FDSETMAXERRS)
COMPATIBLE_IOCTL(FDGETMAXERRS)
COMPATIBLE_IOCTL(FDGETDRVTYP)
COMPATIBLE_IOCTL(FDEJECT)
COMPATIBLE_IOCTL(FDCLRPRM)
COMPATIBLE_IOCTL(FDFMTBEG)
COMPATIBLE_IOCTL(FDFMTEND)
COMPATIBLE_IOCTL(FDRESET)
COMPATIBLE_IOCTL(FDTWADDLE)
COMPATIBLE_IOCTL(FDFMTTRK)
COMPATIBLE_IOCTL(FDRAWCMD)
/* 0x12 */
#ifdef CONFIG_BLOCK
COMPATIBLE_IOCTL(BLKRASET)
COMPATIBLE_IOCTL(BLKROSET)
COMPATIBLE_IOCTL(BLKROGET)
COMPATIBLE_IOCTL(BLKRRPART)
COMPATIBLE_IOCTL(BLKFLSBUF)
COMPATIBLE_IOCTL(BLKSECTSET)
COMPATIBLE_IOCTL(BLKSSZGET)
COMPATIBLE_IOCTL(BLKTRACESTART)
COMPATIBLE_IOCTL(BLKTRACESTOP)
COMPATIBLE_IOCTL(BLKTRACESETUP)
COMPATIBLE_IOCTL(BLKTRACETEARDOWN)
ULONG_IOCTL(BLKRASET)
ULONG_IOCTL(BLKFRASET)
#endif
/* RAID */
COMPATIBLE_IOCTL(RAID_VERSION)
COMPATIBLE_IOCTL(GET_ARRAY_INFO)
@ -2807,50 +2212,6 @@ COMPATIBLE_IOCTL(PPGETMODE)
COMPATIBLE_IOCTL(PPGETPHASE)
COMPATIBLE_IOCTL(PPGETFLAGS)
COMPATIBLE_IOCTL(PPSETFLAGS)
/* CDROM stuff */
COMPATIBLE_IOCTL(CDROMPAUSE)
COMPATIBLE_IOCTL(CDROMRESUME)
COMPATIBLE_IOCTL(CDROMPLAYMSF)
COMPATIBLE_IOCTL(CDROMPLAYTRKIND)
COMPATIBLE_IOCTL(CDROMREADTOCHDR)
COMPATIBLE_IOCTL(CDROMREADTOCENTRY)
COMPATIBLE_IOCTL(CDROMSTOP)
COMPATIBLE_IOCTL(CDROMSTART)
COMPATIBLE_IOCTL(CDROMEJECT)
COMPATIBLE_IOCTL(CDROMVOLCTRL)
COMPATIBLE_IOCTL(CDROMSUBCHNL)
ULONG_IOCTL(CDROMEJECT_SW)
COMPATIBLE_IOCTL(CDROMMULTISESSION)
COMPATIBLE_IOCTL(CDROM_GET_MCN)
COMPATIBLE_IOCTL(CDROMRESET)
COMPATIBLE_IOCTL(CDROMVOLREAD)
COMPATIBLE_IOCTL(CDROMSEEK)
COMPATIBLE_IOCTL(CDROMPLAYBLK)
COMPATIBLE_IOCTL(CDROMCLOSETRAY)
ULONG_IOCTL(CDROM_SET_OPTIONS)
ULONG_IOCTL(CDROM_CLEAR_OPTIONS)
ULONG_IOCTL(CDROM_SELECT_SPEED)
ULONG_IOCTL(CDROM_SELECT_DISC)
ULONG_IOCTL(CDROM_MEDIA_CHANGED)
ULONG_IOCTL(CDROM_DRIVE_STATUS)
COMPATIBLE_IOCTL(CDROM_DISC_STATUS)
COMPATIBLE_IOCTL(CDROM_CHANGER_NSLOTS)
ULONG_IOCTL(CDROM_LOCKDOOR)
ULONG_IOCTL(CDROM_DEBUG)
COMPATIBLE_IOCTL(CDROM_GET_CAPABILITY)
/* Ignore cdrom.h about these next 5 ioctls, they absolutely do
* not take a struct cdrom_read, instead they take a struct cdrom_msf
* which is compatible.
*/
COMPATIBLE_IOCTL(CDROMREADMODE2)
COMPATIBLE_IOCTL(CDROMREADMODE1)
COMPATIBLE_IOCTL(CDROMREADRAW)
COMPATIBLE_IOCTL(CDROMREADCOOKED)
COMPATIBLE_IOCTL(CDROMREADALL)
/* DVD ioctls */
COMPATIBLE_IOCTL(DVD_READ_STRUCT)
COMPATIBLE_IOCTL(DVD_WRITE_STRUCT)
COMPATIBLE_IOCTL(DVD_AUTH)
/* pktcdvd */
COMPATIBLE_IOCTL(PACKET_CTRL_CMD)
/* Big A */
@ -3336,33 +2697,6 @@ HANDLE_IOCTL(SIOCGSTAMP, do_siocgstamp)
HANDLE_IOCTL(SIOCGSTAMPNS, do_siocgstampns)
#endif
#ifdef CONFIG_BLOCK
HANDLE_IOCTL(HDIO_GETGEO, hdio_getgeo)
HANDLE_IOCTL(BLKRAGET, w_long)
HANDLE_IOCTL(BLKGETSIZE, w_long)
HANDLE_IOCTL(0x1260, broken_blkgetsize)
HANDLE_IOCTL(BLKFRAGET, w_long)
HANDLE_IOCTL(BLKSECTGET, w_long)
HANDLE_IOCTL(BLKPG, blkpg_ioctl_trans)
HANDLE_IOCTL(HDIO_GET_UNMASKINTR, hdio_ioctl_trans)
HANDLE_IOCTL(HDIO_GET_MULTCOUNT, hdio_ioctl_trans)
HANDLE_IOCTL(HDIO_GET_KEEPSETTINGS, hdio_ioctl_trans)
HANDLE_IOCTL(HDIO_GET_32BIT, hdio_ioctl_trans)
HANDLE_IOCTL(HDIO_GET_NOWERR, hdio_ioctl_trans)
HANDLE_IOCTL(HDIO_GET_DMA, hdio_ioctl_trans)
HANDLE_IOCTL(HDIO_GET_NICE, hdio_ioctl_trans)
HANDLE_IOCTL(HDIO_GET_WCACHE, hdio_ioctl_trans)
HANDLE_IOCTL(HDIO_GET_ACOUSTIC, hdio_ioctl_trans)
HANDLE_IOCTL(HDIO_GET_ADDRESS, hdio_ioctl_trans)
HANDLE_IOCTL(HDIO_GET_BUSSTATE, hdio_ioctl_trans)
HANDLE_IOCTL(FDSETPRM32, fd_ioctl_trans)
HANDLE_IOCTL(FDDEFPRM32, fd_ioctl_trans)
HANDLE_IOCTL(FDGETPRM32, fd_ioctl_trans)
HANDLE_IOCTL(FDSETDRVPRM32, fd_ioctl_trans)
HANDLE_IOCTL(FDGETDRVPRM32, fd_ioctl_trans)
HANDLE_IOCTL(FDGETDRVSTAT32, fd_ioctl_trans)
HANDLE_IOCTL(FDPOLLDRVSTAT32, fd_ioctl_trans)
HANDLE_IOCTL(FDGETFDCSTAT32, fd_ioctl_trans)
HANDLE_IOCTL(FDWERRORGET32, fd_ioctl_trans)
HANDLE_IOCTL(SG_IO,sg_ioctl_trans)
HANDLE_IOCTL(SG_GET_REQUEST_TABLE, sg_grt_trans)
#endif
@ -3373,8 +2707,6 @@ HANDLE_IOCTL(PPPIOCSACTIVE32, ppp_sock_fprog_ioctl_trans)
#ifdef CONFIG_BLOCK
HANDLE_IOCTL(MTIOCGET32, mt_ioctl_trans)
HANDLE_IOCTL(MTIOCPOS32, mt_ioctl_trans)
HANDLE_IOCTL(CDROMREADAUDIO, cdrom_ioctl_trans)
HANDLE_IOCTL(CDROM_SEND_PACKET, cdrom_ioctl_trans)
#endif
#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93,0x64,unsigned int)
HANDLE_IOCTL(AUTOFS_IOC_SETTIMEOUT32, ioc_settimeout)
@ -3415,9 +2747,6 @@ HANDLE_IOCTL(SONET_GETFRAMING, do_atm_ioctl)
HANDLE_IOCTL(SONET_GETFRSENSE, do_atm_ioctl)
/* block stuff */
#ifdef CONFIG_BLOCK
HANDLE_IOCTL(BLKBSZGET_32, do_blkbszget)
HANDLE_IOCTL(BLKBSZSET_32, do_blkbszset)
HANDLE_IOCTL(BLKGETSIZE64_32, do_blkgetsize64)
/* Raw devices */
HANDLE_IOCTL(RAW_SETBIND, raw_ioctl)
HANDLE_IOCTL(RAW_GETBIND, raw_ioctl)

View File

@ -264,15 +264,12 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio);
/*
* Asynchronous IO callback.
*/
static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error)
static void dio_bio_end_aio(struct bio *bio, int error)
{
struct dio *dio = bio->bi_private;
unsigned long remaining;
unsigned long flags;
if (bio->bi_size)
return 1;
/* cleanup the bio */
dio_bio_complete(dio, bio);
@ -287,8 +284,6 @@ static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error)
aio_complete(dio->iocb, ret, 0);
kfree(dio);
}
return 0;
}
/*
@ -298,21 +293,17 @@ static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error)
* During I/O bi_private points at the dio. After I/O, bi_private is used to
* implement a singly-linked list of completed BIOs, at dio->bio_list.
*/
static int dio_bio_end_io(struct bio *bio, unsigned int bytes_done, int error)
static void dio_bio_end_io(struct bio *bio, int error)
{
struct dio *dio = bio->bi_private;
unsigned long flags;
if (bio->bi_size)
return 1;
spin_lock_irqsave(&dio->bio_lock, flags);
bio->bi_private = dio->bio_list;
dio->bio_list = bio;
if (--dio->refcount == 1 && dio->waiter)
wake_up_process(dio->waiter);
spin_unlock_irqrestore(&dio->bio_lock, flags);
return 0;
}
static int

View File

@ -14,6 +14,7 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/fs.h>

View File

@ -160,11 +160,9 @@ int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent)
}
static int end_bio_io_page(struct bio *bio, unsigned int bytes_done, int error)
static void end_bio_io_page(struct bio *bio, int error)
{
struct page *page = bio->bi_private;
if (bio->bi_size)
return 1;
if (!error)
SetPageUptodate(page);

View File

@ -2200,16 +2200,13 @@ static int lbmIOWait(struct lbuf * bp, int flag)
*
* executed at INTIODONE level
*/
static int lbmIODone(struct bio *bio, unsigned int bytes_done, int error)
static void lbmIODone(struct bio *bio, int error)
{
struct lbuf *bp = bio->bi_private;
struct lbuf *nextbp, *tail;
struct jfs_log *log;
unsigned long flags;
if (bio->bi_size)
return 1;
/*
* get back jfs buffer bound to the i/o buffer
*/

View File

@ -280,14 +280,10 @@ static void last_read_complete(struct page *page)
unlock_page(page);
}
static int metapage_read_end_io(struct bio *bio, unsigned int bytes_done,
int err)
static void metapage_read_end_io(struct bio *bio, int err)
{
struct page *page = bio->bi_private;
if (bio->bi_size)
return 1;
if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
printk(KERN_ERR "metapage_read_end_io: I/O error\n");
SetPageError(page);
@ -341,16 +337,12 @@ static void last_write_complete(struct page *page)
end_page_writeback(page);
}
static int metapage_write_end_io(struct bio *bio, unsigned int bytes_done,
int err)
static void metapage_write_end_io(struct bio *bio, int err)
{
struct page *page = bio->bi_private;
BUG_ON(!PagePrivate(page));
if (bio->bi_size)
return 1;
if (! test_bit(BIO_UPTODATE, &bio->bi_flags)) {
printk(KERN_ERR "metapage_write_end_io: I/O error\n");
SetPageError(page);

View File

@ -39,14 +39,11 @@
* status of that page is hard. See end_buffer_async_read() for the details.
* There is no point in duplicating all that complexity.
*/
static int mpage_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
static void mpage_end_io_read(struct bio *bio, int err)
{
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
if (bio->bi_size)
return 1;
do {
struct page *page = bvec->bv_page;
@ -62,17 +59,13 @@ static int mpage_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
unlock_page(page);
} while (bvec >= bio->bi_io_vec);
bio_put(bio);
return 0;
}
static int mpage_end_io_write(struct bio *bio, unsigned int bytes_done, int err)
static void mpage_end_io_write(struct bio *bio, int err)
{
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
if (bio->bi_size)
return 1;
do {
struct page *page = bvec->bv_page;
@ -87,7 +80,6 @@ static int mpage_end_io_write(struct bio *bio, unsigned int bytes_done, int err)
end_page_writeback(page);
} while (bvec >= bio->bi_io_vec);
bio_put(bio);
return 0;
}
static struct bio *mpage_bio_submit(int rw, struct bio *bio)

View File

@ -217,7 +217,6 @@ static void o2hb_wait_on_io(struct o2hb_region *reg,
}
static int o2hb_bio_end_io(struct bio *bio,
unsigned int bytes_done,
int error)
{
struct o2hb_bio_wait_ctxt *wc = bio->bi_private;
@ -227,9 +226,6 @@ static int o2hb_bio_end_io(struct bio *bio,
wc->wc_error = error;
}
if (bio->bi_size)
return 1;
o2hb_bio_wait_dec(wc, 1);
bio_put(bio);
return 0;

View File

@ -326,14 +326,10 @@ xfs_iomap_valid(
STATIC int
xfs_end_bio(
struct bio *bio,
unsigned int bytes_done,
int error)
{
xfs_ioend_t *ioend = bio->bi_private;
if (bio->bi_size)
return 1;
ASSERT(atomic_read(&bio->bi_cnt) >= 1);
ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;

View File

@ -1106,16 +1106,12 @@ _xfs_buf_ioend(
STATIC int
xfs_buf_bio_end_io(
struct bio *bio,
unsigned int bytes_done,
int error)
{
xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
unsigned int blocksize = bp->b_target->bt_bsize;
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
if (bio->bi_size)
return 1;
if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
bp->b_error = EIO;

View File

@ -64,7 +64,7 @@ struct bio_vec {
struct bio_set;
struct bio;
typedef int (bio_end_io_t) (struct bio *, unsigned int, int);
typedef void (bio_end_io_t) (struct bio *, int);
typedef void (bio_destructor_t) (struct bio *);
/*
@ -226,7 +226,7 @@ struct bio {
#define BIO_SEG_BOUNDARY(q, b1, b2) \
BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
#define bio_io_error(bio, bytes) bio_endio((bio), (bytes), -EIO)
#define bio_io_error(bio) bio_endio((bio), -EIO)
/*
* drivers should not use the __ version unless they _really_ want to
@ -286,7 +286,7 @@ extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
extern void bio_put(struct bio *);
extern void bio_free(struct bio *, struct bio_set *);
extern void bio_endio(struct bio *, unsigned int, int);
extern void bio_endio(struct bio *, int);
struct request_queue;
extern int bio_phys_segments(struct request_queue *, struct bio *);
extern int bio_hw_segments(struct request_queue *, struct bio *);

View File

@ -1,6 +1,8 @@
#ifndef _LINUX_BLKDEV_H
#define _LINUX_BLKDEV_H
#ifdef CONFIG_BLOCK
#include <linux/sched.h>
#include <linux/major.h>
#include <linux/genhd.h>
@ -32,8 +34,6 @@
)
#endif
#ifdef CONFIG_BLOCK
struct scsi_ioctl_command;
struct request_queue;
@ -471,7 +471,6 @@ struct request_queue
int orderr, ordcolor;
struct request pre_flush_rq, bar_rq, post_flush_rq;
struct request *orig_bar_rq;
unsigned int bi_size;
struct mutex sysfs_lock;
@ -637,10 +636,23 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
}
#endif /* CONFIG_MMU */
#define rq_for_each_bio(_bio, rq) \
struct req_iterator {
int i;
struct bio *bio;
};
/* This should not be used directly - use rq_for_each_segment */
#define __rq_for_each_bio(_bio, rq) \
if ((rq->bio)) \
for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
#define rq_for_each_segment(bvl, _rq, _iter) \
__rq_for_each_bio(_iter.bio, _rq) \
bio_for_each_segment(bvl, _iter.bio, _iter.i)
#define rq_iter_last(rq, _iter) \
(_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
extern void register_disk(struct gendisk *dev);
@ -662,8 +674,8 @@ extern int sg_scsi_ioctl(struct file *, struct request_queue *,
/*
* Temporary export, until SCSI gets fixed up.
*/
extern int ll_back_merge_fn(struct request_queue *, struct request *,
struct bio *);
extern int blk_rq_append_bio(struct request_queue *q, struct request *rq,
struct bio *bio);
/*
* A queue has just exitted congestion. Note this in the global counter of
@ -810,7 +822,6 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
return bqt->tag_index[tag];
}
extern void blk_rq_bio_prep(struct request_queue *, struct request *, struct bio *);
extern int blkdev_issue_flush(struct block_device *, sector_t *);
#define MAX_PHYS_SEGMENTS 128

View File

@ -142,10 +142,14 @@ struct blk_user_trace_setup {
u32 pid;
};
#ifdef __KERNEL__
#if defined(CONFIG_BLK_DEV_IO_TRACE)
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(struct request_queue *);
extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
extern int do_blk_trace_setup(struct request_queue *q,
struct block_device *bdev, struct blk_user_trace_setup *buts);
/**
* blk_add_trace_rq - Add a trace for a request oriented action
@ -286,6 +290,12 @@ static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0)
#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0)
static inline int do_blk_trace_setup(struct request_queue *q,
struct block_device *bdev,
struct blk_user_trace_setup *buts)
{
return 0;
}
#endif /* CONFIG_BLK_DEV_IO_TRACE */
#endif /* __KERNEL__ */
#endif

View File

@ -1738,6 +1738,11 @@
#define PCI_VENDOR_ID_RADISYS 0x1331
#define PCI_VENDOR_ID_MICRO_MEMORY 0x1332
#define PCI_DEVICE_ID_MICRO_MEMORY_5415CN 0x5415
#define PCI_DEVICE_ID_MICRO_MEMORY_5425CN 0x5425
#define PCI_DEVICE_ID_MICRO_MEMORY_6155 0x6155
#define PCI_VENDOR_ID_DOMEX 0x134a
#define PCI_DEVICE_ID_DOMEX_DMX3191D 0x0001

View File

@ -221,7 +221,7 @@ extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *);
/* linux/mm/page_io.c */
extern int swap_readpage(struct file *, struct page *);
extern int swap_writepage(struct page *page, struct writeback_control *wbc);
extern int end_swap_bio_read(struct bio *bio, unsigned int bytes_done, int err);
extern void end_swap_bio_read(struct bio *bio, int err);
/* linux/mm/swap_state.c */
extern struct address_space swapper_space;

View File

@ -5,6 +5,7 @@
#define WRITEBACK_H
#include <linux/sched.h>
#include <linux/fs.h>
struct backing_dev_info;

View File

@ -61,6 +61,7 @@
#include <linux/delayacct.h>
#include <linux/reciprocal_div.h>
#include <linux/unistd.h>
#include <linux/pagemap.h>
#include <asm/tlb.h>

View File

@ -140,26 +140,19 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
mempool_free(bvec->bv_page, pool);
}
bio_endio(bio_orig, bio_orig->bi_size, err);
bio_endio(bio_orig, err);
bio_put(bio);
}
static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done, int err)
static void bounce_end_io_write(struct bio *bio, int err)
{
if (bio->bi_size)
return 1;
bounce_end_io(bio, page_pool, err);
return 0;
}
static int bounce_end_io_write_isa(struct bio *bio, unsigned int bytes_done, int err)
static void bounce_end_io_write_isa(struct bio *bio, int err)
{
if (bio->bi_size)
return 1;
bounce_end_io(bio, isa_page_pool, err);
return 0;
}
static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
@ -172,22 +165,14 @@ static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
bounce_end_io(bio, pool, err);
}
static int bounce_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
static void bounce_end_io_read(struct bio *bio, int err)
{
if (bio->bi_size)
return 1;
__bounce_end_io_read(bio, page_pool, err);
return 0;
}
static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int err)
static void bounce_end_io_read_isa(struct bio *bio, int err)
{
if (bio->bi_size)
return 1;
__bounce_end_io_read(bio, isa_page_pool, err);
return 0;
}
static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,

View File

@ -44,14 +44,11 @@ static struct bio *get_swap_bio(gfp_t gfp_flags, pgoff_t index,
return bio;
}
static int end_swap_bio_write(struct bio *bio, unsigned int bytes_done, int err)
static void end_swap_bio_write(struct bio *bio, int err)
{
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct page *page = bio->bi_io_vec[0].bv_page;
if (bio->bi_size)
return 1;
if (!uptodate) {
SetPageError(page);
/*
@ -71,17 +68,13 @@ static int end_swap_bio_write(struct bio *bio, unsigned int bytes_done, int err)
}
end_page_writeback(page);
bio_put(bio);
return 0;
}
int end_swap_bio_read(struct bio *bio, unsigned int bytes_done, int err)
void end_swap_bio_read(struct bio *bio, int err)
{
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct page *page = bio->bi_io_vec[0].bv_page;
if (bio->bi_size)
return 1;
if (!uptodate) {
SetPageError(page);
ClearPageUptodate(page);
@ -94,7 +87,6 @@ int end_swap_bio_read(struct bio *bio, unsigned int bytes_done, int err)
}
unlock_page(page);
bio_put(bio);
return 0;
}
/*

View File

@ -15,6 +15,7 @@
#include <linux/backing-dev.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/pagevec.h>
#include <linux/pagemap.h>
void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
{