码迷,mamicode.com
首页 > 其他好文 > 详细

nvme 驱动详解 之1

时间:2015-08-17 21:47:01      阅读:787      评论:0      收藏:0      [点我收藏+]

标签:linux   driver   

按照老的套路,在分析一个driver时,我们首先看这个driver相关的kconfig及Makefile文件,察看相关的源代码文件.

在开始阅读一个driver,通常都是从module_init or syscall_init函数看起。

 

下面让我们开始nvme的旅程吧。

首先打开driver/block下的kconfig文件,其中定义了BLK_DEV_NVME config,如下。

config BLK_DEV_NVME

         tristate"NVM Express block device"

         dependson PCI

         ---help---

           The NVM Express driver is for solid statedrives directly

           connected to the PCI or PCI Express bus.  If you know you

           don‘t have one of these, it is safe to answerN.

 

           To compile this driver as a module, choose Mhere: the

           module will be called nvme.

通过console,输入make menuconfig,搜索BLK_DEV_NEME得到如下依赖关系。

Symbol:BLK_DEV_NVME [=m]                                                                      

  | Type : tristate                                                                                

  | Prompt: NVM Express block device                                                                

  |  Location:                                                                                     

  |    -> Device Drivers                                                                           

  | (1)  -> Block devices (BLK_DEV [=y])                                                           

  |  Defined at drivers/block/Kconfig:313                                                           

  |  Depends on: BLK_DEV [=y] && PCI [=y]

可以看到nemv 依赖于BLKPCI

打开driver/block/Makefile,搜索NVME,可以看到:

obj-$(CONFIG_BLK_DEV_NVME)    += nvme.o

nvme-y              := nvme-core.o nvme-scsi.o

 

关于和BLK相关的文件,打开block/Makefile:

obj-$(CONFIG_BLOCK):= bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \

                            blk-flush.oblk-settings.o blk-ioc.o blk-map.o \

                            blk-exec.oblk-merge.o blk-softirq.o blk-timeout.o \

                            blk-iopoll.oblk-lib.o blk-mq.o blk-mq-tag.o \

                            blk-mq-sysfs.oblk-mq-cpu.o blk-mq-cpumap.o ioctl.o \

                            genhd.o scsi_ioctl.opartition-generic.o ioprio.o \

                            partitions/

哇塞,是不是很多?不要担心,NVME也只是用了BLOCK层的一些函数而已,不用把所用与BLOCK相关的文件都看了,除非你有精力去研究。

 

好了,到目前为止,我们知道了要看哪些文件了,nvme-core.cnvme-scsi.c是必须的,剩下的就是当我们的driver调用到block层哪些函数再去研究。

 

打开nvme-core,查看入口函数,module_init(nvme_init);

staticint __init nvme_init(void)

{

         int result;

 

         init_waitqueue_head(&nvme_kthread_wait);//创建等待队列

 

         nvme_workq =create_singlethread_workqueue("nvme");//创建工作队列

         if (!nvme_workq)

                   return -ENOMEM;

 

         result = register_blkdev(nvme_major, "nvme");//注册块设备

         if (result < 0)

                   goto kill_workq;

         else if (result > 0)

                   nvme_major = result;

 

         result = pci_register_driver(&nvme_driver);//注册pci driver

         if (result)

                   goto unregister_blkdev;

         return 0;

 

 unregister_blkdev:

         unregister_blkdev(nvme_major,"nvme");

 kill_workq:

         destroy_workqueue(nvme_workq);

         return result;

}

注册pci driver后,会调用nvme_driver中的probe函数。发现开始总是美好的,函数是如此的简洁,不要高兴的太早,痛苦的经历正在逼近。

staticint nvme_probe(structpci_dev *pdev, const struct pci_device_id *id)

{

         int node, result = -ENOMEM;

         struct nvme_dev *dev;

 

         node = dev_to_node(&pdev->dev);//获取node节点,与NUMA系统有关。

         if (node == NUMA_NO_NODE)

                   set_dev_node(&pdev->dev,0);

 

         dev = kzalloc_node(sizeof(*dev),GFP_KERNEL, node);

         if (!dev)

                   return -ENOMEM;

         dev->entry =kzalloc_node(num_possible_cpus() * sizeof(*dev->entry),//分配msix-entry

                                                                 GFP_KERNEL,node);

         if (!dev->entry)

                   goto free;

         dev->queues =kzalloc_node((num_possible_cpus() + 1) * sizeof(void *),//分配queues 资源,

                                                                 GFP_KERNEL,node);//这里之所以多1,是因为有admin-queues

         if (!dev->queues)

                   goto free;

 

         INIT_LIST_HEAD(&dev->namespaces);//初始化namespaces链表。

         dev->reset_workfn =nvme_reset_failed_dev;

         INIT_WORK(&dev->reset_work,nvme_reset_workfn);

         dev->pci_dev = pci_dev_get(pdev);

         pci_set_drvdata(pdev, dev);

         result = nvme_set_instance(dev);//设置pci设备的句柄instance,代表该设备。

         if (result)

                   goto put_pci;

 

         result = nvme_setup_prp_pools(dev);//设置dma需要的prp内存池。

         if (result)

                   goto release;

 

         kref_init(&dev->kref);

         result = nvme_dev_start(dev);//创建admin queue io queue request irq

         if (result)

                   goto release_pools;

 

         if (dev->online_queues > 1)

                   result = nvme_dev_add(dev);//初始化mq,并增加一个实际可用的nvme dev,并且admin_queue可以发送cmd

         if (result)

                   goto shutdown;

 

         scnprintf(dev->name,sizeof(dev->name), "nvme%d", dev->instance);

         dev->miscdev.minor =MISC_DYNAMIC_MINOR;

         dev->miscdev.parent =&pdev->dev;

         dev->miscdev.name = dev->name;

         dev->miscdev.fops =&nvme_dev_fops;

         result =misc_register(&dev->miscdev);//注册一个misc设备

         if (result)

                   goto remove;

 

         nvme_set_irq_hints(dev);

 

         dev->initialized = 1;

         return 0;

 

 remove:

         nvme_dev_remove(dev);

         nvme_dev_remove_admin(dev);

         nvme_free_namespaces(dev);

 shutdown:

         nvme_dev_shutdown(dev);

 release_pools:

         nvme_free_queues(dev, 0);

         nvme_release_prp_pools(dev);

 release:

         nvme_release_instance(dev);

 put_pci:

         pci_dev_put(dev->pci_dev);

 free:

         kfree(dev->queues);

         kfree(dev->entry);

         kfree(dev);

         return result;

}

上面每一个主要功能的函数都简单了注释了一下,描述了做的哪些工作,下面具体看看那些函数怎么实现的。

staticint nvme_set_instance(structnvme_dev *dev)

{

         int instance, error;

 

         do {

                   if(!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))

                            return -ENODEV;

 

                   spin_lock(&dev_list_lock);

                   error =ida_get_new(&nvme_instance_ida, &instance);

                   spin_unlock(&dev_list_lock);

         } while (error == -EAGAIN);

 

         if (error)

                   return -ENODEV;

 

         dev->instance = instance;//该函数获得设备的instance,相当于该设备的id,代表着该设备。

         return 0;

}

 

Nvme_setup_prp_pools用来创建dma时所用的内存池,prp_page_pool是虚拟内核地址,

staticint nvme_setup_prp_pools(structnvme_dev *dev)

{

         struct device *dmadev =&dev->pci_dev->dev;

         dev->prp_page_pool =dma_pool_create("prp list page", dmadev,

                                                        PAGE_SIZE,PAGE_SIZE, 0);

         if (!dev->prp_page_pool)

                   return -ENOMEM;

 

         /* Optimisation for I/Os between 4k and128k */

         dev->prp_small_pool =dma_pool_create("prp list 256", dmadev,

                                                        256,256, 0);

         if (!dev->prp_small_pool) {

                   dma_pool_destroy(dev->prp_page_pool);

                   return -ENOMEM;

         }

         return 0;

}

 

下面是一个重量级的函数之一,nvme_dev_start;

staticint nvme_dev_start(struct nvme_dev *dev)

{

         int result;

         bool start_thread = false;

 

         result = nvme_dev_map(dev);

         if (result)

                   return result;

 

         result = nvme_configure_admin_queue(dev);//配置adminsubmit queue 和complete queue,64 depth

         if (result)

                   goto unmap;

 

         spin_lock(&dev_list_lock);

         if (list_empty(&dev_list)&& IS_ERR_OR_NULL(nvme_thread)) {

                   start_thread = true;

                   nvme_thread = NULL;

         }

         list_add(&dev->node,&dev_list);

         spin_unlock(&dev_list_lock);

 

         if (start_thread) {

                   nvme_thread = kthread_run(nvme_kthread, NULL,"nvme");

                   wake_up_all(&nvme_kthread_wait);

         } else

                   wait_event_killable(nvme_kthread_wait,nvme_thread);

 

         if (IS_ERR_OR_NULL(nvme_thread)) {

                   result = nvme_thread ?PTR_ERR(nvme_thread) : -EINTR;

                   goto disable;

         }

 

         nvme_init_queue(dev->queues[0], 0);//始化queue,并online_queues++

         result = nvme_alloc_admin_tags(dev);

         if (result)

                   goto disable;

 

         result = nvme_setup_io_queues(dev);

         if (result)

                   goto free_tags;

 

         nvme_set_irq_hints(dev);

 

         return result;

 

 free_tags:

         nvme_dev_remove_admin(dev);

 disable:

         nvme_disable_queue(dev, 0);

         nvme_dev_list_remove(dev);

 unmap:

         nvme_dev_unmap(dev);

         return result;

}

首先看nvme_configure_admin_queue(dev) 这个函数。

staticint nvme_configure_admin_queue(struct nvme_dev *dev)

{

         int result;

         u32 aqa;

         u64 cap =readq(&dev->bar->cap);//读cap寄存器

         struct nvme_queue *nvmeq;

         unsigned page_shift = PAGE_SHIFT;

         unsigned dev_page_min =NVME_CAP_MPSMIN(cap) + 12;

         unsigned dev_page_max =NVME_CAP_MPSMAX(cap) + 12;

 

         if (page_shift < dev_page_min) {

                   dev_err(&dev->pci_dev->dev,

                                     "Minimumdevice page size (%u) too large for "

                                     "host(%u)\n", 1 << dev_page_min,

                                     1 <<page_shift);

                   return -ENODEV;

         }

         if (page_shift > dev_page_max) {

                   dev_info(&dev->pci_dev->dev,

                                     "Devicemaximum page size (%u) smaller than "

                                     "host(%u); enabling work-around\n",

                                     1 <<dev_page_max, 1 << page_shift);

                   page_shift = dev_page_max;

         }

 

         result = nvme_disable_ctrl(dev, cap);//disablecontroller

         if (result < 0)

                   return result;

 

         nvmeq = dev->queues[0];

         if (!nvmeq) {

                   nvmeq = nvme_alloc_queue(dev, 0,NVME_AQ_DEPTH);//如果nvmeq==null,就创建nvmeq

                   if (!nvmeq)

                            return -ENOMEM;

         }

 

         aqa = nvmeq->q_depth - 1;

         aqa |= aqa << 16;

 

         dev->page_size = 1 <<page_shift;

 

         dev->ctrl_config = NVME_CC_CSS_NVM;

         dev->ctrl_config |= (page_shift -12) << NVME_CC_MPS_SHIFT;

         dev->ctrl_config |= NVME_CC_ARB_RR |NVME_CC_SHN_NONE;

         dev->ctrl_config |= NVME_CC_IOSQES |NVME_CC_IOCQES;

 

         writel(aqa, &dev->bar->aqa);

         writeq(nvmeq->sq_dma_addr,&dev->bar->asq);

         writeq(nvmeq->cq_dma_addr,&dev->bar->acq); //该语句是创建nvmeqsubmit queuecomplete queue

 

         result = nvme_enable_ctrl(dev, cap);

         if (result)

                   goto free_nvmeq;

 

         nvmeq->cq_vector = 0;

         result = queue_request_irq(dev, nvmeq,nvmeq->irqname);//注册中断

         if (result)

                   goto free_nvmeq;

 

         return result;

 

 free_nvmeq:

         nvme_free_queues(dev, 0);

         return result;

}

下面看一下在nvme_alloc_queue函数中作了什么。

staticstruct nvme_queue *nvme_alloc_queue(structnvme_dev *dev, int qid,

                                                                 intdepth)

{

         struct device *dmadev =&dev->pci_dev->dev;

         struct nvme_queue *nvmeq =kzalloc(sizeof(*nvmeq), GFP_KERNEL);

         if (!nvmeq)

                   return NULL;

 

         nvmeq->cqes =dma_zalloc_coherent(dmadev, CQ_SIZE(depth),

                                                 &nvmeq->cq_dma_addr, GFP_KERNEL); //分配complete queue cmds空间,深度为depth个。

         if (!nvmeq->cqes)

                   goto free_nvmeq;

 

         nvmeq->sq_cmds =dma_alloc_coherent(dmadev, SQ_SIZE(depth),

                                               &nvmeq->sq_dma_addr,GFP_KERNEL);//分配submit queuecmds空间,深度为depth个。

         if (!nvmeq->sq_cmds)

                   goto free_cqdma;

 

         nvmeq->q_dmadev = dmadev;

         nvmeq->dev = dev;

         snprintf(nvmeq->irqname,sizeof(nvmeq->irqname), "nvme%dq%d",

                            dev->instance,qid);//设置nvmeqirqname

         spin_lock_init(&nvmeq->q_lock);

         nvmeq->cq_head = 0;

         nvmeq->cq_phase = 1;

         nvmeq->q_db = &dev->dbs[qid *2 * dev->db_stride];

         nvmeq->q_depth = depth;

         nvmeq->qid = qid;

         dev->queue_count++;

         dev->queues[qid] = nvmeq;//将分配的nvmeq保存在dev->queues[qid]位置

 

         return nvmeq;//返回得到的nvmeq

 

 free_cqdma:

         dma_free_coherent(dmadev,CQ_SIZE(depth), (void *)nvmeq->cqes,

                                                                 nvmeq->cq_dma_addr);

 free_nvmeq:

         kfree(nvmeq);

         return NULL;

}

 

到此,我们完成了admin queue的complete queue和submit queue的创建和中断的注册。下面一句是nvme_kthread 守护进程的创建,这个我们稍候再讲。我们先看一下下面的函数。

staticvoid nvme_init_queue(structnvme_queue *nvmeq, u16 qid)

{

         struct nvme_dev *dev = nvmeq->dev;

 

         spin_lock_irq(&nvmeq->q_lock);

         nvmeq->sq_tail = 0;//完成一些nvmeq的初始化工作

         nvmeq->cq_head = 0;

         nvmeq->cq_phase = 1;

         nvmeq->q_db = &dev->dbs[qid *2 * dev->db_stride];

         memset((void *)nvmeq->cqes, 0,CQ_SIZE(nvmeq->q_depth));

         dev->online_queues++;//将dev->online_queues++,代表online_queues增加1

         spin_unlock_irq(&nvmeq->q_lock);

}

 

下面的函数时nvme使用mq的核心。

staticint nvme_alloc_admin_tags(structnvme_dev *dev)

{

         if (!dev->admin_q) {//初始化admin_qnull,故进入if分支

                   dev->admin_tagset.ops =&nvme_mq_admin_ops;//初始化blk_mq_tag_set结构体,nvme_mq_admin_opsrun request会用到

                   dev->admin_tagset.nr_hw_queues= 1;//hardware queue个数为1

                   dev->admin_tagset.queue_depth= NVME_AQ_DEPTH - 1;

                   dev->admin_tagset.timeout= ADMIN_TIMEOUT;

                   dev->admin_tagset.numa_node= dev_to_node(&dev->pci_dev->dev);

                   dev->admin_tagset.cmd_size= sizeof(struct nvme_cmd_info);

                   dev->admin_tagset.driver_data= dev;

 

                   if (blk_mq_alloc_tag_set(&dev->admin_tagset))//分配一个tag set与一个或多个request queues关联。

                            return -ENOMEM;

 

                   dev->admin_q =blk_mq_init_queue(&dev->admin_tagset);

                   if (IS_ERR(dev->admin_q)){

                            blk_mq_free_tag_set(&dev->admin_tagset);

                            return -ENOMEM;

                   }

                   if (!blk_get_queue(dev->admin_q)){

                            nvme_dev_remove_admin(dev);

                            return -ENODEV;

                   }

         } else

                   blk_mq_unfreeze_queue(dev->admin_q);

 

         return 0;

}

下面依次介绍blk_mq中相关的函数。

/*

 * Alloc a tag set to be associated with one ormore request queues.

 * May fail with EINVAL for various errorconditions. May adjust the

 * requested depth down, if if it too large. Inthat case, the set

 * value will be stored in set->queue_depth.

 */

int blk_mq_alloc_tag_set(structblk_mq_tag_set *set)

{

         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1<< BLK_MQ_UNIQUE_TAG_BITS);

 

         if (!set->nr_hw_queues)

                   return -EINVAL;

         if (!set->queue_depth)

                   return -EINVAL;

         if (set->queue_depth <set->reserved_tags + BLK_MQ_TAG_MIN)

                   return -EINVAL;

 

         if (!set->nr_hw_queues ||!set->ops->queue_rq || !set->ops->map_queue)

                   return -EINVAL;

 

         if (set->queue_depth >BLK_MQ_MAX_DEPTH) {

                   pr_info("blk-mq: reducedtag depth to %u\n",

                            BLK_MQ_MAX_DEPTH);

                   set->queue_depth =BLK_MQ_MAX_DEPTH;

         }

 

         /*

          * If a crashdump is active, then we arepotentially in a very

          * memory constrained environment. Limit us to1 queue and

          * 64 tags to prevent using too much memory.

          */

         if (is_kdump_kernel()) {

                   set->nr_hw_queues = 1;

                   set->queue_depth =min(64U, set->queue_depth);

         }

 

         set->tags =kmalloc_node(set->nr_hw_queues *   //在这里给tags分配与nr_hw_queues个空间

                                      sizeof(struct blk_mq_tags *),

                                      GFP_KERNEL, set->numa_node);

         if (!set->tags)

                   return -ENOMEM;

 

         if (blk_mq_alloc_rq_maps(set))

                   goto enomem;

 

         mutex_init(&set->tag_list_lock);

         INIT_LIST_HEAD(&set->tag_list);

 

         return 0;

enomem:

         kfree(set->tags);

         set->tags = NULL;

         return -ENOMEM;

}

 

/*

 * Allocate the request maps associated withthis tag_set. Note that this

 * may reduce the depth asked for, if memory istight. set->queue_depth

 * will be updated to reflect the allocateddepth.

 */

staticint blk_mq_alloc_rq_maps(structblk_mq_tag_set *set)

{

         unsigned int depth;

         int err;

 

         depth = set->queue_depth;

         do {

                   err =__blk_mq_alloc_rq_maps(set);//如果成功,则跳出,否则,将queue_depth减半,创建

                   if (!err)

                            break;

 

                   set->queue_depth >>=1;

                   if (set->queue_depth <set->reserved_tags + BLK_MQ_TAG_MIN) {

                            err = -ENOMEM;

                            break;

                   }

         } while (set->queue_depth);

 

         if (!set->queue_depth || err) {

                   pr_err("blk-mq: failedto allocate request map\n");

                   return -ENOMEM;

         }

 

         if (depth != set->queue_depth)

                   pr_info("blk-mq: reducedtag depth (%u -> %u)\n",

                                                        depth,set->queue_depth);

 

         return 0;

}


未完待续。。。。

版权声明:本文为博主原创文章,未经博主允许不得转载。

nvme 驱动详解 之1

标签:linux   driver   

原文地址:http://blog.csdn.net/qqqqqq999999/article/details/47732319

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!