Код:
static void stackbd_io_fn(struct bio *bio)
{
sector_t sector = bio->bi_iter.bi_sector;
int size = bio->bi_iter.bi_size;
int nsect = size >> KERNEL_SECTOR_SHIFT;
DECLARE_BIO_VEC bvec;
struct bvec_iter iter;
u8 *buffer = kmalloc(size, GFP_ATOMIC);
u8 *ptr = buffer;
if (bio_data_dir(bio) == READ)
{
ald_buffer_read(sector, nsect, ptr);
bio_for_each_segment(bvec, bio, iter)
{
u8 *dst = page_address(ACCESS_BIO_VEC(bvec).bv_page) + ACCESS_BIO_VEC(bvec).bv_offset;
int len = ACCESS_BIO_VEC(bvec).bv_len;
memcpy(dst, ptr, len);
ptr += len;
}
}
else
{
bio_for_each_segment(bvec, bio, iter)
{
u8 *src = page_address(ACCESS_BIO_VEC(bvec).bv_page) + ACCESS_BIO_VEC(bvec).bv_offset;
int len = ACCESS_BIO_VEC(bvec).bv_len;
memcpy(ptr, src, len);
ptr += len;
}
ald_buffer_write(sector, nsect, buffer);
}
kfree(buffer);
my_bio_complete(bio, 0);
}
static int stackbd_threadfn(void *data)
{
struct bio *bio;
set_user_nice(current, -20);
while (!kthread_should_stop())
{
/* wake_up() is after adding bio to list. No need for condition */
wait_event_interruptible(req_event, kthread_should_stop() ||
!bio_list_empty(&stackbd.bio_list));
spin_lock_irq(&stackbd.lock);
if (bio_list_empty(&stackbd.bio_list))
{
spin_unlock_irq(&stackbd.lock);
continue;
}
bio = bio_list_pop(&stackbd.bio_list);
spin_unlock_irq(&stackbd.lock);
stackbd_io_fn(bio);
}
return 0;
}
#if USE_BLKMQ
//#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 3)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
static blk_status_t hidden_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data* bd)
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)
static blk_status_t hidden_queue_rq(struct blk_mq_hw_ctx *hctx, struct request* rq, bool last)
#else
static blk_status_t hidden_queue_rq(struct blk_mq_hw_ctx *hctx, struct request* rq)
#endif
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
struct request *rq = bd->rq;
#endif
struct bio *bio = rq->bio;
pb_alloc(bio, rq);
spin_lock_irq(&stackbd.lock);
if (!stackbd.bdev_raw)
{
printk("stackbd: Request before bdev_raw is ready, aborting\n");
goto abort;
}
if (!stackbd.is_active)
{
printk("stackbd: Device not active yet, aborting\n");
goto abort;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)
blk_mq_start_request(rq);
#endif
bio_list_add(&stackbd.bio_list, bio);
wake_up(&req_event);
spin_unlock_irq(&stackbd.lock);
exit:
return BLK_STS_OK; //always return ok
abort:
my_bio_complete(bio, -EIO);
goto exit;
}
static struct blk_mq_ops _mq_ops =
{
.queue_rq = hidden_queue_rq,
#if LINUX_VERSION_CODE <= KERNEL_VERSION(4, 2, 0)
.map_queue = blk_mq_map_queue
#endif
};
#else // #if USE_BLKMQ
/*
* Handle an I/O request.
*/
static blk_qc_t stackbd_make_request(struct request_queue *q, struct bio *bio)
{
printk("stackbd: make request %-5s block %-12lu #pages %-4hu total-size "
"%-10u\n", bio_data_dir(bio) == WRITE ? "write" : "read",
bio->bi_iter.bi_sector, bio->bi_vcnt, bio->bi_iter.bi_size);
// printk("<%p> Make request %s %s %s\n", bio,
// bio->bi_rw & REQ_SYNC ? "SYNC" : "",
// bio->bi_rw & REQ_FLUSH ? "FLUSH" : "",
// bio->bi_rw & REQ_NOIDLE ? "NOIDLE" : "");
//
spin_lock_irq(&stackbd.lock);
if (!stackbd.bdev_raw)
{
printk("stackbd: Request before bdev_raw is ready, aborting\n");
goto abort;
}
if (!stackbd.is_active)
{
printk("stackbd: Device not active yet, aborting\n");
goto abort;
}
bio_list_add(&stackbd.bio_list, bio);
wake_up(&req_event);
spin_unlock_irq(&stackbd.lock);
goto exit;
abort:
spin_unlock_irq(&stackbd.lock);
printk("<%p> Abort request\n\n", bio);
bio_io_error(bio);
exit:
return BLK_QC_T_NONE;
}
#endif // #if USE_BLKMQ
static struct block_device *stackbd_bdev_open(char dev_path[])
{
/* Open underlying device */
struct block_device *bdev_raw = lookup_bdev(dev_path);
printk("Opened %s\n", dev_path);
if (IS_ERR(bdev_raw))
{
printk("stackbd: error opening raw device <%lu>\n", PTR_ERR(bdev_raw));
return NULL;
}
if (!bdget(bdev_raw->bd_dev))
{
printk("stackbd: error bdget()\n");
return NULL;
}
if (blkdev_get(bdev_raw, STACKBD_BDEV_MODE, &stackbd))
{
printk("stackbd: error blkdev_get()\n");
bdput(bdev_raw);
return NULL;
}
return bdev_raw;
}
static int stackbd_start(char dev_path[])
{
unsigned max_sectors;
if (!(stackbd.bdev_raw = stackbd_bdev_open(dev_path)))
return -EFAULT;
/* Set up our internal device */
stackbd.capacity = get_capacity(stackbd.bdev_raw->bd_disk);
printk("stackbd: Device real capacity: %lu\n", stackbd.capacity);
set_capacity(stackbd.gd, stackbd.capacity);
max_sectors = queue_max_hw_sectors(bdev_get_queue(stackbd.bdev_raw));
blk_queue_max_hw_sectors(stackbd.queue, max_sectors);
printk("stackbd: Max sectors: %u\n", max_sectors);
stackbd.thread = kthread_create(stackbd_threadfn, NULL,
stackbd.gd->disk_name);
if (IS_ERR(stackbd.thread))
{
printk("stackbd: error kthread_create <%lu>\n",
PTR_ERR(stackbd.thread));
goto error_after_bdev;
}
printk("stackbd: done initializing successfully\n");
stackbd.is_active = 1;
wake_up_process(stackbd.thread);
return 0;
error_after_bdev:
blkdev_put(stackbd.bdev_raw, STACKBD_BDEV_MODE);
bdput(stackbd.bdev_raw);
return -EFAULT;
}
static int stackbd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
char dev_path[80];
void __user *argp = (void __user *)arg;
switch (cmd)
{
case STACKBD_DO_IT:
printk("\n*** DO IT!!!!!!! ***\n\n");
if (copy_from_user(dev_path, argp, sizeof(dev_path)))
return -EFAULT;
return stackbd_start(dev_path);
default:
return -ENOTTY;
}
}