blkfront: Clean up vbd release

* Current blkfront_closing is rather a xlvbd_release_gendisk.
   Renamed in preparation of later patches (need the name again).

 * Removed the misleading comment -- this only applied to the backend
   switch handler, and the queue is already flushed btw.

 * Break out the xenbus call, callers know better when to switch
   frontend state.

Signed-off-by: Daniel Stodden <daniel.stodden@citrix.com>
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
This commit is contained in:
Daniel Stodden 2010-08-07 18:33:17 +02:00 committed by Jens Axboe
parent 9897cb5323
commit a66b5aebb7

View File

@ -534,6 +534,39 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
return err;
}
static void xlvbd_release_gendisk(struct blkfront_info *info)
{
unsigned int minor, nr_minors;
unsigned long flags;
if (info->rq == NULL)
return;
spin_lock_irqsave(&blkif_io_lock, flags);
/* No more blkif_request(). */
blk_stop_queue(info->rq);
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback);
spin_unlock_irqrestore(&blkif_io_lock, flags);
/* Flush gnttab callback work. Must be done with no locks held. */
flush_scheduled_work();
del_gendisk(info->gd);
minor = info->gd->first_minor;
nr_minors = info->gd->minors;
xlbd_release_minors(minor, nr_minors);
blk_cleanup_queue(info->rq);
info->rq = NULL;
put_disk(info->gd);
info->gd = NULL;
}
static void kick_pending_request_queues(struct blkfront_info *info)
{
if (!RING_FULL(&info->ring)) {
@ -994,49 +1027,6 @@ static void blkfront_connect(struct blkfront_info *info)
info->is_ready = 1;
}
/**
* Handle the change of state of the backend to Closing. We must delete our
* device-layer structures now, to ensure that writes are flushed through to
* the backend. Once is this done, we can switch to Closed in
* acknowledgement.
*/
static void blkfront_closing(struct blkfront_info *info)
{
unsigned int minor, nr_minors;
unsigned long flags;
if (info->rq == NULL)
goto out;
spin_lock_irqsave(&blkif_io_lock, flags);
/* No more blkif_request(). */
blk_stop_queue(info->rq);
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback);
spin_unlock_irqrestore(&blkif_io_lock, flags);
/* Flush gnttab callback work. Must be done with no locks held. */
flush_scheduled_work();
minor = info->gd->first_minor;
nr_minors = info->gd->minors;
del_gendisk(info->gd);
xlbd_release_minors(minor, nr_minors);
blk_cleanup_queue(info->rq);
info->rq = NULL;
put_disk(info->gd);
info->gd = NULL;
out:
if (info->xbdev)
xenbus_frontend_closed(info->xbdev);
}
/**
* Callback received when the backend's state changes.
*/
@ -1073,8 +1063,11 @@ static void blkback_changed(struct xenbus_device *dev,
if (info->users > 0)
xenbus_dev_error(dev, -EBUSY,
"Device in use; refusing to close");
else
blkfront_closing(info);
else {
xlvbd_release_gendisk(info);
xenbus_frontend_closed(info->xbdev);
}
mutex_unlock(&bd->bd_mutex);
bdput(bd);
break;
@ -1130,11 +1123,13 @@ static int blkif_release(struct gendisk *disk, fmode_t mode)
struct xenbus_device *dev = info->xbdev;
if (!dev) {
blkfront_closing(info);
xlvbd_release_gendisk(info);
kfree(info);
} else if (xenbus_read_driver_state(dev->otherend)
== XenbusStateClosing && info->is_ready)
blkfront_closing(info);
== XenbusStateClosing && info->is_ready) {
xlvbd_release_gendisk(info);
xenbus_frontend_closed(dev);
}
}
unlock_kernel();
return 0;