ARM的嵌入式Linux移植體驗(yàn)之設(shè)備驅(qū)動(dòng)
static int write_cached_data (struct mtdblk_dev *mtdblk)
{
struct mtd_info *mtd = mtdblk->mtd;
int ret;
if (mtdblk->cache_state != STATE_DIRTY)
return 0;
DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: writing cached data for /"%s/" "
"at 0x%lx, size 0x%x/n", mtd->name,
mtdblk->cache_offset, mtdblk->cache_size);
ret = erase_write (mtd, mtdblk->cache_offset,
mtdblk->cache_size, mtdblk->cache_data);
if (ret)
return ret;
mtdblk->cache_state = STATE_EMPTY;
return 0;
}
static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
int len, const char *buf)
{
…
}
static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
int len, char *buf)
{
…
}
static int mtdblock_open(struct inode *inode, struct file *file)
{
…
}
static release_t mtdblock_release(struct inode *inode, struct file *file)
{
int dev;
struct mtdblk_dev *mtdblk;
DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release/n");
if (inode == NULL)
release_return(-ENODEV);
dev = minor(inode->i_rdev);
mtdblk = mtdblks[dev];
down(mtdblk->cache_sem);
write_cached_data(mtdblk);
up(mtdblk->cache_sem);
spin_lock(mtdblks_lock);
if (!--mtdblk->count) {
/* It was the last usage. Free the device */
mtdblks[dev] = NULL;
spin_unlock(mtdblks_lock);
if (mtdblk->mtd->sync)
mtdblk->mtd->sync(mtdblk->mtd);
put_mtd_device(mtdblk->mtd);
vfree(mtdblk->cache_data);
kfree(mtdblk);
} else {
spin_unlock(mtdblks_lock);
}
DEBUG(MTD_DEBUG_LEVEL1, "ok/n");
BLK_DEC_USE_COUNT;
release_return(0);
}
/*
* This is a special request_fn because it is executed in a process context
* to be able to sleep independently of the caller. The
* io_request_lock (for 2.5) or queue_lock (for >=2.5) is held upon entry
* and exit. The head of our request queue is considered active so there is
* no need to dequeue requests before we are done.
*/
static void handle_mtdblock_request(void)
{
struct request *req;
struct mtdblk_dev *mtdblk;
unsigned int res;
for (;;) {
INIT_REQUEST;
req = CURRENT;
spin_unlock_irq(QUEUE_LOCK(QUEUE));
mtdblk = mtdblks[minor(req->rq_dev)];
res = 0;
if (minor(req->rq_dev) >= MAX_MTD_DEVICES)
panic("%s : minor out of bound", __FUNCTION__);
if (!IS_REQ_CMD(req))
goto end_req;
if ((req->sector + req->current_nr_sectors) > (mtdblk->mtd->size >> 9))
goto end_req;
// Handle the request
switch (rq_data_dir(req))
{
int err;
case READ:
down(mtdblk->cache_sem);
err = do_cached_read (mtdblk, req->sector 9,
req->current_nr_sectors 9,
req->buffer);
up(mtdblk->cache_sem);
if (!err)
res = 1;
break;
case WRITE:
// Read only device
if ( !(mtdblk->mtd->flags MTD_WRITEABLE) )
break;
// Do the write
down(mtdblk->cache_sem);
err = do_cached_write (mtdblk, req->sector 9,req->current_nr_sectors 9, req->buffer);
up(mtdblk->cache_sem);
if (!err)
res = 1;
break;
}
end_req:
spin_lock_irq(QUEUE_LOCK(QUEUE));
end_request(res);
}
}
static volatile int leaving = 0;
static DECLARE_MUTEX_LOCKED(thread_sem);
static DECLARE_WAIT_QUEUE_HEAD(thr_wq);
int mtdblock_thread(void *dummy)
{
…
}
#define RQFUNC_ARG request_queue_t *q
static void mtdblock_request(RQFUNC_ARG)
{
/* Don't do anything, except wake the thread if necESSary */
wake_up(thr_wq);
}
static int mtdblock_ioctl(struct inode * inode, struct file * file,
unsigned int CMd, unsigned long arg)
{
struct mtdblk_dev *mtdblk;
mtdblk = mtdblks[minor(inode->i_rdev)];
switch (cMD) {
case BLKGETSIZE: /* Return device size */
return put_user((mtdblk->mtd->size >> 9), (unsigned long *) arg);
case BLKFLSBUF:
if(!capable(CAP_SYS_ADMIN))
return -EACCES;
fsync_dev(inode->i_rdev);
invalidate_buffers(inode->i_rdev);
down(mtdblk->cache_sem);
write_cached_data(mtdblk);
up(mtdblk->cache_sem);
if (mtdblk->mtd->sync)
mtdblk->mtd->sync(mtdblk->mtd);
return 0;
default:
return -EINVAL;
}
}
linux相關(guān)文章:linux教程
評(píng)論