提交 ee9ff853 编写于 作者: K Konrad Rzeszutek Wilk

xen/blkback: Squash vbd.c,interface.c in blkback.c and xenbus.c respectivly.

Daniel Stodden suggested to eliminate vbd.c and interface.c, inlining the
critical bits where they belong, respectively.

Leaving only blkback.c for the data- and xenbus.c for the control path.
Suggested-by: NDaniel Stodden <daniel.stodden@citrix.com>
Signed-off-by: NKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
上级 dfc07b13
obj-$(CONFIG_XEN_BLKDEV_BACKEND) := xen-blkback.o
xen-blkback-y := blkback.o xenbus.o interface.o vbd.o
xen-blkback-y := blkback.o xenbus.o
......@@ -166,6 +166,141 @@ static void free_req(struct pending_req *req)
wake_up(&blkbk->pending_free_wq);
}
/*
* Routines for managing virtual block devices (vbds).
*/
#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
(_v)->bdev->bd_part->nr_sects : \
get_capacity((_v)->bdev->bd_disk))
unsigned long long vbd_size(struct vbd *vbd)
{
return vbd_sz(vbd);
}
unsigned int vbd_info(struct vbd *vbd)
{
return vbd->type | (vbd->readonly ? VDISK_READONLY : 0);
}
unsigned long vbd_secsize(struct vbd *vbd)
{
return bdev_logical_block_size(vbd->bdev);
}
int vbd_create(struct blkif_st *blkif, blkif_vdev_t handle, unsigned major,
unsigned minor, int readonly, int cdrom)
{
struct vbd *vbd;
struct block_device *bdev;
vbd = &blkif->vbd;
vbd->handle = handle;
vbd->readonly = readonly;
vbd->type = 0;
vbd->pdevice = MKDEV(major, minor);
bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ?
FMODE_READ : FMODE_WRITE, NULL);
if (IS_ERR(bdev)) {
DPRINTK("vbd_creat: device %08x could not be opened.\n",
vbd->pdevice);
return -ENOENT;
}
vbd->bdev = bdev;
vbd->size = vbd_size(vbd);
if (vbd->bdev->bd_disk == NULL) {
DPRINTK("vbd_creat: device %08x doesn't exist.\n",
vbd->pdevice);
vbd_free(vbd);
return -ENOENT;
}
if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
vbd->type |= VDISK_CDROM;
if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
vbd->type |= VDISK_REMOVABLE;
DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
handle, blkif->domid);
return 0;
}
void vbd_free(struct vbd *vbd)
{
if (vbd->bdev)
blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE);
vbd->bdev = NULL;
}
int vbd_translate(struct phys_req *req, struct blkif_st *blkif, int operation)
{
struct vbd *vbd = &blkif->vbd;
int rc = -EACCES;
if ((operation != READ) && vbd->readonly)
goto out;
if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
goto out;
req->dev = vbd->pdevice;
req->bdev = vbd->bdev;
rc = 0;
out:
return rc;
}
void vbd_resize(struct blkif_st *blkif)
{
struct vbd *vbd = &blkif->vbd;
struct xenbus_transaction xbt;
int err;
struct xenbus_device *dev = blkback_xenbus(blkif->be);
unsigned long long new_size = vbd_size(vbd);
printk(KERN_INFO "VBD Resize: Domid: %d, Device: (%d, %d)\n",
blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
printk(KERN_INFO "VBD Resize: new size %llu\n", new_size);
vbd->size = new_size;
again:
err = xenbus_transaction_start(&xbt);
if (err) {
printk(KERN_WARNING "Error starting transaction");
return;
}
err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
vbd_size(vbd));
if (err) {
printk(KERN_WARNING "Error writing new size");
goto abort;
}
/*
* Write the current state; we will use this to synchronize
* the front-end. If the current state is "connected" the
* front-end will get the new size information online.
*/
err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
if (err) {
printk(KERN_WARNING "Error writing the state");
goto abort;
}
err = xenbus_transaction_end(xbt, 0);
if (err == -EAGAIN)
goto again;
if (err)
printk(KERN_WARNING "Error ending transaction");
abort:
xenbus_transaction_end(xbt, 1);
}
/*
* Notification from the guest OS.
*/
......
/******************************************************************************
* Block-device interface management.
*
* Copyright (c) 2004, Keir Fraser
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "common.h"
#include <xen/events.h>
#include <xen/grant_table.h>
#include <linux/kthread.h>
static struct kmem_cache *blkif_cachep;
struct blkif_st *blkif_alloc(domid_t domid)
{
struct blkif_st *blkif;
blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
if (!blkif)
return ERR_PTR(-ENOMEM);
memset(blkif, 0, sizeof(*blkif));
blkif->domid = domid;
spin_lock_init(&blkif->blk_ring_lock);
atomic_set(&blkif->refcnt, 1);
init_waitqueue_head(&blkif->wq);
blkif->st_print = jiffies;
init_waitqueue_head(&blkif->waiting_to_free);
return blkif;
}
static int map_frontend_page(struct blkif_st *blkif, unsigned long shared_page)
{
struct gnttab_map_grant_ref op;
gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
GNTMAP_host_map, shared_page, blkif->domid);
if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
BUG();
if (op.status) {
DPRINTK(" Grant table operation failure !\n");
return op.status;
}
blkif->shmem_ref = shared_page;
blkif->shmem_handle = op.handle;
return 0;
}
static void unmap_frontend_page(struct blkif_st *blkif)
{
struct gnttab_unmap_grant_ref op;
gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
GNTMAP_host_map, blkif->shmem_handle);
if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
BUG();
}
int blkif_map(struct blkif_st *blkif, unsigned long shared_page,
unsigned int evtchn)
{
int err;
/* Already connected through? */
if (blkif->irq)
return 0;
blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE);
if (!blkif->blk_ring_area)
return -ENOMEM;
err = map_frontend_page(blkif, shared_page);
if (err) {
free_vm_area(blkif->blk_ring_area);
return err;
}
switch (blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE:
{
struct blkif_sring *sring;
sring = (struct blkif_sring *)blkif->blk_ring_area->addr;
BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
break;
}
case BLKIF_PROTOCOL_X86_32:
{
struct blkif_x86_32_sring *sring_x86_32;
sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring_area->addr;
BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
break;
}
case BLKIF_PROTOCOL_X86_64:
{
struct blkif_x86_64_sring *sring_x86_64;
sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring_area->addr;
BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
break;
}
default:
BUG();
}
err = bind_interdomain_evtchn_to_irqhandler(
blkif->domid, evtchn, blkif_be_int, 0, "blkif-backend", blkif);
if (err < 0) {
unmap_frontend_page(blkif);
free_vm_area(blkif->blk_ring_area);
blkif->blk_rings.common.sring = NULL;
return err;
}
blkif->irq = err;
return 0;
}
void blkif_disconnect(struct blkif_st *blkif)
{
if (blkif->xenblkd) {
kthread_stop(blkif->xenblkd);
blkif->xenblkd = NULL;
}
atomic_dec(&blkif->refcnt);
wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
atomic_inc(&blkif->refcnt);
if (blkif->irq) {
unbind_from_irqhandler(blkif->irq, blkif);
blkif->irq = 0;
}
if (blkif->blk_rings.common.sring) {
unmap_frontend_page(blkif);
free_vm_area(blkif->blk_ring_area);
blkif->blk_rings.common.sring = NULL;
}
}
void blkif_free(struct blkif_st *blkif)
{
if (!atomic_dec_and_test(&blkif->refcnt))
BUG();
kmem_cache_free(blkif_cachep, blkif);
}
int __init blkif_interface_init(void)
{
blkif_cachep = kmem_cache_create("blkif_cache", sizeof(struct blkif_st),
0, 0, NULL);
if (!blkif_cachep)
return -ENOMEM;
return 0;
}
/******************************************************************************
* Routines for managing virtual block devices (VBDs).
*
* Copyright (c) 2003-2005, Keir Fraser & Steve Hand
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "common.h"
#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
(_v)->bdev->bd_part->nr_sects : \
get_capacity((_v)->bdev->bd_disk))
unsigned long long vbd_size(struct vbd *vbd)
{
return vbd_sz(vbd);
}
unsigned int vbd_info(struct vbd *vbd)
{
return vbd->type | (vbd->readonly ? VDISK_READONLY : 0);
}
unsigned long vbd_secsize(struct vbd *vbd)
{
return bdev_logical_block_size(vbd->bdev);
}
int vbd_create(struct blkif_st *blkif, blkif_vdev_t handle, unsigned major,
unsigned minor, int readonly, int cdrom)
{
struct vbd *vbd;
struct block_device *bdev;
vbd = &blkif->vbd;
vbd->handle = handle;
vbd->readonly = readonly;
vbd->type = 0;
vbd->pdevice = MKDEV(major, minor);
bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ?
FMODE_READ : FMODE_WRITE, NULL);
if (IS_ERR(bdev)) {
DPRINTK("vbd_creat: device %08x could not be opened.\n",
vbd->pdevice);
return -ENOENT;
}
vbd->bdev = bdev;
vbd->size = vbd_size(vbd);
if (vbd->bdev->bd_disk == NULL) {
DPRINTK("vbd_creat: device %08x doesn't exist.\n",
vbd->pdevice);
vbd_free(vbd);
return -ENOENT;
}
if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
vbd->type |= VDISK_CDROM;
if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
vbd->type |= VDISK_REMOVABLE;
DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
handle, blkif->domid);
return 0;
}
void vbd_free(struct vbd *vbd)
{
if (vbd->bdev)
blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE);
vbd->bdev = NULL;
}
int vbd_translate(struct phys_req *req, struct blkif_st *blkif, int operation)
{
struct vbd *vbd = &blkif->vbd;
int rc = -EACCES;
if ((operation != READ) && vbd->readonly)
goto out;
if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
goto out;
req->dev = vbd->pdevice;
req->bdev = vbd->bdev;
rc = 0;
out:
return rc;
}
void vbd_resize(struct blkif_st *blkif)
{
struct vbd *vbd = &blkif->vbd;
struct xenbus_transaction xbt;
int err;
struct xenbus_device *dev = blkback_xenbus(blkif->be);
unsigned long long new_size = vbd_size(vbd);
printk(KERN_INFO "VBD Resize: Domid: %d, Device: (%d, %d)\n",
blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
printk(KERN_INFO "VBD Resize: new size %llu\n", new_size);
vbd->size = new_size;
again:
err = xenbus_transaction_start(&xbt);
if (err) {
printk(KERN_WARNING "Error starting transaction");
return;
}
err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
vbd_size(vbd));
if (err) {
printk(KERN_WARNING "Error writing new size");
goto abort;
}
/*
* Write the current state; we will use this to synchronize
* the front-end. If the current state is "connected" the
* front-end will get the new size information online.
*/
err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
if (err) {
printk(KERN_WARNING "Error writing the state");
goto abort;
}
err = xenbus_transaction_end(xbt, 0);
if (err == -EAGAIN)
goto again;
if (err)
printk(KERN_WARNING "Error ending transaction");
abort:
xenbus_transaction_end(xbt, 1);
}
......@@ -20,6 +20,8 @@
#include <stdarg.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <xen/events.h>
#include <xen/grant_table.h>
#include "common.h"
#undef DPRINTK
......@@ -36,6 +38,7 @@ struct backend_info {
char *mode;
};
static struct kmem_cache *blkif_cachep;
static void connect(struct backend_info *);
static int connect_ring(struct backend_info *);
static void backend_changed(struct xenbus_watch *, const char **,
......@@ -106,6 +109,154 @@ static void update_blkif_status(struct blkif_st *blkif)
}
}
struct blkif_st *blkif_alloc(domid_t domid)
{
struct blkif_st *blkif;
blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
if (!blkif)
return ERR_PTR(-ENOMEM);
memset(blkif, 0, sizeof(*blkif));
blkif->domid = domid;
spin_lock_init(&blkif->blk_ring_lock);
atomic_set(&blkif->refcnt, 1);
init_waitqueue_head(&blkif->wq);
blkif->st_print = jiffies;
init_waitqueue_head(&blkif->waiting_to_free);
return blkif;
}
static int map_frontend_page(struct blkif_st *blkif, unsigned long shared_page)
{
struct gnttab_map_grant_ref op;
gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
GNTMAP_host_map, shared_page, blkif->domid);
if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
BUG();
if (op.status) {
DPRINTK(" Grant table operation failure !\n");
return op.status;
}
blkif->shmem_ref = shared_page;
blkif->shmem_handle = op.handle;
return 0;
}
static void unmap_frontend_page(struct blkif_st *blkif)
{
struct gnttab_unmap_grant_ref op;
gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
GNTMAP_host_map, blkif->shmem_handle);
if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
BUG();
}
int blkif_map(struct blkif_st *blkif, unsigned long shared_page,
unsigned int evtchn)
{
int err;
/* Already connected through? */
if (blkif->irq)
return 0;
blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE);
if (!blkif->blk_ring_area)
return -ENOMEM;
err = map_frontend_page(blkif, shared_page);
if (err) {
free_vm_area(blkif->blk_ring_area);
return err;
}
switch (blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE:
{
struct blkif_sring *sring;
sring = (struct blkif_sring *)blkif->blk_ring_area->addr;
BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
break;
}
case BLKIF_PROTOCOL_X86_32:
{
struct blkif_x86_32_sring *sring_x86_32;
sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring_area->addr;
BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
break;
}
case BLKIF_PROTOCOL_X86_64:
{
struct blkif_x86_64_sring *sring_x86_64;
sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring_area->addr;
BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
break;
}
default:
BUG();
}
err = bind_interdomain_evtchn_to_irqhandler(
blkif->domid, evtchn, blkif_be_int, 0, "blkif-backend", blkif);
if (err < 0) {
unmap_frontend_page(blkif);
free_vm_area(blkif->blk_ring_area);
blkif->blk_rings.common.sring = NULL;
return err;
}
blkif->irq = err;
return 0;
}
void blkif_disconnect(struct blkif_st *blkif)
{
if (blkif->xenblkd) {
kthread_stop(blkif->xenblkd);
blkif->xenblkd = NULL;
}
atomic_dec(&blkif->refcnt);
wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
atomic_inc(&blkif->refcnt);
if (blkif->irq) {
unbind_from_irqhandler(blkif->irq, blkif);
blkif->irq = 0;
}
if (blkif->blk_rings.common.sring) {
unmap_frontend_page(blkif);
free_vm_area(blkif->blk_ring_area);
blkif->blk_rings.common.sring = NULL;
}
}
void blkif_free(struct blkif_st *blkif)
{
if (!atomic_dec_and_test(&blkif->refcnt))
BUG();
kmem_cache_free(blkif_cachep, blkif);
}
int __init blkif_interface_init(void)
{
blkif_cachep = kmem_cache_create("blkif_cache", sizeof(struct blkif_st),
0, 0, NULL);
if (!blkif_cachep)
return -ENOMEM;
return 0;
}
/*
* sysfs interface for VBD I/O requests
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册