码迷,mamicode.com
首页 > 编程语言 > 详细

Cuder - 用C++11封装的CUDA类

时间:2018-01-29 00:27:30      阅读:595      评论:0      收藏:0      [点我收藏+]

标签:virt   mda   app   必须   mat   turn   bool   put   nbsp   

以前写cuda:初始化环境,申请显存,初始化显存,launch kernel,拷贝数据,释放显存。一个页面大部分都是这些繁杂但又必须的操作,有时还会忘掉释放部分显存。

今天用C++11封装了这些CUDA操作,然后就可以专注于写kernel代码了。.cu文件就像glsl shader文件一样简洁明了。

例如:./kernel.cu文件,里面只有一个fill函数用于填充数组A。

extern "C"  __global__ void fill(int * A, int cnt){
    const int gap = blockDim.x*gridDim.x;
    for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < cnt; id += gap)
        A[id] = id * 2;
};

下面的main.cpp演示了Cuder类的使用

#include "Cuder.h"
const int N = 65536;
int main(){
    int A[N];  for (int i = 0; i < N; ++i) A[i] = i;
    
    //禁止随意创建CUcontext,将构造函数声明为private,安全起见禁用了拷贝构造函数和拷贝赋值运算符
    redips::Cuder cuder = redips::Cuder::getInstance();
    //添加并编译一个.cu文件,相当于glsl shader 文件
    cuder.addModule("./kernel.cu");
    //显存上申请一个大小为[sizeof(int)*N]的数组,并将其命名为["a_dev"],用于后面操作中该数组的标识;
    //如果第三个参数不为null,还会执行cpu->gpu的数据拷贝
    cuder.applyArray("a_dev", sizeof(int)*N, A);
    //运行["./kernel.cu"]文件中指定的["fill"]函数, 前两个参数设定了gridSize和blockSize
    //{ "a_dev", N }是C++11中的initializer_list, 如果是字符串则对应前面申请的显存数组名,否则是变量类型
    cuder.launch(dim3(512, 1, 1), dim3(256, 1, 1), "./kernel.cu", "fill", { "a_dev", N });
    //将["a_dev"]对应的显存数组拷贝回[A]
    cuder.fetchArray("a_dev", sizeof(int)*N, A);
    
    return 0;
}

.cu文件不用参与项目的生成,nvrtc会将其编译成ptx文件。

下面贴上Cuder.h的代码

#pragma once
#include <map>
#include <string>
#include <vector>
#include <cuda.h>
#include <nvrtc.h>
#include <fstream>
#include <sstream>
#include <iostream>
#include <cudaProfiler.h>
#include <cuda_runtime.h>
#include <helper_cuda_drvapi.h>

namespace redips{
    class Cuder{
        CUcontext context;
        std::map <std::string, CUmodule> modules;
        std::map <std::string, CUdeviceptr> devptrs;
        
        Cuder(){ 
            checkCudaErrors(cuCtxCreate(&context, 0, cuDevice)); 
        }
        void release(){
            //for (auto module : modules) delete module.second;
            for (auto dptr : devptrs)    cuMemFree(dptr.second);
            devptrs.clear();
            modules.clear();
            cuCtxDestroy(context);
        }
    public:
        class ValueHolder{
        public:
            void * value = nullptr;
            bool is_string = false;
            ValueHolder(const char* str){
                value = (void*)str;
                is_string = true;
            }
            template <typename T>
            ValueHolder(const T& data){
                value = new T(data);
            }
        };

        static Cuder getInstance(){
            if (!cuda_enviroment_initialized) initialize();
            return Cuder();
        }

        //forbidden copy-constructor and assignment function
        Cuder(const Cuder&) = delete;
        Cuder& operator= (const Cuder& another) = delete;

        Cuder(Cuder&& another){
            this->context = another.context;
            another.context = nullptr;
            this->devptrs = std::map<std::string, CUdeviceptr>(std::move(another.devptrs));
            this->modules = std::map<std::string, CUmodule>(std::move(another.modules));
        }
        Cuder& operator= (Cuder&& another) {
            if (this->context == another.context) return *this;
            release();
            this->context = another.context; 
            another.context = nullptr;
            this->devptrs = std::map<std::string, CUdeviceptr>(std::move(another.devptrs));
            this->modules = std::map<std::string, CUmodule>(std::move(another.modules));
            return *this;
        }
        
        virtual ~Cuder(){ release();    };
        
    public:
        bool launch(dim3 gridDim, dim3 blockDim, const char* module, const char* kernel_function, std::initializer_list<ValueHolder> params){
            //get kernel address
            if (!modules.count(module)){
                std::cerr << "[Cuder] : error: doesn‘t exists an module named " << module << std::endl; return false;
            }
            CUfunction kernel_addr;
            if (CUDA_SUCCESS != cuModuleGetFunction(&kernel_addr, modules[module], kernel_function)){
                std::cerr << "[Cuder] : error: doesn‘t exists an kernel named " << kernel_function << " in module " << module << std::endl; return false;
            }
            //setup params
            std::vector<void*> pamary;
            for (auto v : params){
                if (v.is_string){
                    if (devptrs.count((const char*)(v.value))) pamary.push_back((void*)(&(devptrs[(const char*)(v.value)])));
                    else{
                        std::cerr << "[Cuder] : error: launch failed. doesn‘t exists an array named " << (const char*)(v.value) << std::endl;;
                        return false;
                    }
                }
                else pamary.push_back(v.value);
            }

            cudaEvent_t start, stop;
            float elapsedTime = 0.0;
            cudaEventCreate(&start);
            cudaEventCreate(&stop);
            cudaEventRecord(start, 0);

            bool result = (CUDA_SUCCESS == cuLaunchKernel(kernel_addr,/* grid dim */gridDim.x, gridDim.y, gridDim.z, /* block dim */blockDim.x, blockDim.y, blockDim.z, /* shared mem, stream */ 0, 0, &pamary[0], /* arguments */0));
            cuCtxSynchronize();

            cudaEventRecord(stop, 0);
            cudaEventSynchronize(stop);
            cudaEventElapsedTime(&elapsedTime, start, stop);
            std::cout << "[Cuder] : launch finish. cost " << elapsedTime << "ms" << std::endl;
            return result;
        }
        bool addModule(const char* cufile){
            if (modules.count(cufile)){
                std::cerr << "[Cuder] : error: already has an modules named " << cufile << std::endl;;
                return false;
            }
            std::string ptx = compileFile2PTX(cufile);
            
            if (ptx.length() > 0){
                CUmodule module;
                checkCudaErrors(cuModuleLoadDataEx(&module, ptx.c_str(), 0, 0, 0));
                modules[cufile] = module;
                return true;
            }
            else{
                std::cerr << "[Cuder] : error: add module " << cufile << " failed!\n";
                return false;
            }
        }
        void applyArray(const char* name, size_t size, void* h_ptr=nullptr){
            if (devptrs.count(name)){
                std::cerr << "[Cuder] : error: already has an array named " << name << std::endl;;
                return;
            }
            CUdeviceptr d_ptr;
            checkCudaErrors(cuMemAlloc(&d_ptr, size));
            if (h_ptr) 
                checkCudaErrors(cuMemcpyHtoD(d_ptr, h_ptr, size));
            devptrs[name] = d_ptr;
        }
        void fetchArray(const char* name, size_t size,void * h_ptr){
            if (!devptrs.count(name)){
                std::cerr << "[Cuder] : error: doesn‘t exists an array named " << name << std::endl;;
                return;
            }
            checkCudaErrors(cuMemcpyDtoH(h_ptr, devptrs[name], size));
        }
        
    private:
        static int devID;
        static CUdevice cuDevice;
        static bool cuda_enviroment_initialized;
        static void initialize(){
            // picks the best CUDA device [with highest Gflops/s] available
            devID = gpuGetMaxGflopsDeviceIdDRV();
            checkCudaErrors(cuDeviceGet(&cuDevice, devID));
            // print device information
            {
                char name[100]; int major = 0, minor = 0;
                checkCudaErrors(cuDeviceGetName(name, 100, cuDevice));
                checkCudaErrors(cuDeviceComputeCapability(&major, &minor, cuDevice));
                printf("[Cuder] : Using CUDA Device [%d]: %s, %d.%d compute capability\n", devID, name, major, minor);
            }
            //initialize
            checkCudaErrors(cuInit(0));

            cuda_enviroment_initialized = true;
        }
        
        std::string compileFile2PTX(const char *filename){
            std::ifstream inputFile(filename, std::ios::in | std::ios::binary | std::ios::ate);
            if (!inputFile.is_open()) {
                std::cerr << "[Cuder] : error: unable to open " << filename << " for reading!\n";
                return "";
            }

            std::streampos pos = inputFile.tellg();
            size_t inputSize = (size_t)pos;
            char * memBlock = new char[inputSize + 1];

            inputFile.seekg(0, std::ios::beg);
            inputFile.read(memBlock, inputSize);
            inputFile.close();
            memBlock[inputSize] = \x0;

            // compile
            nvrtcProgram prog;
            if (nvrtcCreateProgram(&prog, memBlock, filename, 0, NULL, NULL) == NVRTC_SUCCESS){
                delete memBlock;
                if (nvrtcCompileProgram(prog, 0, nullptr) == NVRTC_SUCCESS){
                    // dump log
                    size_t logSize; 
                    nvrtcGetProgramLogSize(prog, &logSize);
                    if (logSize>0){
                        char *log = new char[logSize + 1];
                        nvrtcGetProgramLog(prog, log);
                        log[logSize] = \x0;
                        std::cout << "[Cuder] : compile [" << filename << "] " << log << std::endl;
                        delete(log);
                    }
                    else std::cout << "[Cuder] : compile [" << filename << "] finish" << std::endl;

                    // fetch PTX
                    size_t ptxSize;
                    nvrtcGetPTXSize(prog, &ptxSize);
                    char *ptx = new char[ptxSize+1];
                    nvrtcGetPTX(prog, ptx);
                    nvrtcDestroyProgram(&prog);
                    return std::string(std::move(ptx));
                }
            }
            delete memBlock;
            return "";
        }
    };
    bool Cuder::cuda_enviroment_initialized = false;
    int Cuder::devID = 0;
    CUdevice Cuder::cuDevice = 0;
};

 下面贴一下VS里面需要的配置

//include 
C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\include
C:\ProgramData\NVIDIA Corporation\CUDA Samples\v7.5\common\inc
//lib
C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\lib\x64

cuda.lib
cudart.lib
nvrtc.lib

 

Cuder - 用C++11封装的CUDA类

标签:virt   mda   app   必须   mat   turn   bool   put   nbsp   

原文地址:https://www.cnblogs.com/redips-l/p/8372795.html

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!