0

我目前正在尝试学习如何使用 CUB 来重写我的积分器代码。我一直在查看文档中的示例和代码片段,但我还没有找到我正在尝试做的示例。具体来说,就是运行从主线程调用的 InclusiveSum。从我所见,所有示例都从主机而不是设备调用函数,但提示可以在这里完成:http: //nvlabs.github.io/cub/structcub_1_1_device_scan.html#a7bcc25e4d9c14a23f71431cf1a6b2bd5

“从内核代码调用此方法时,请务必在编译器的宏定义中定义 CUB_CDP 宏。”

我已经尝试在 Visual Studio 2012 中添加它,方法是转到我的项目的属性->Cuda 链接器-> 命令行并添加“-DCUB_CDP”。我不确定这是否正确,但我得到以下构建行:

"nvcc.exe" -gencode=arch=compute_35,code=\"sm_35,compute_35\" --use-local-env --cl-version 2012 -ccbin "C:\Program Files (x86)\Microsoft Visual Studio 11.0 \VC\bin\x86_amd64" -rdc=true -I"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v6.0\include" -I"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v6 .0\include" -G --keep-dir x64\Debug -maxrregcount=0 --machine 64 --compile -cudart static -DCUB_CDP -g -D_MBCS -Xcompiler "/EHsc /W3 /nologo /Od /Zi /RTC1 /MT " -o "x64\Debug\Algorithm Test.cu.obj" "C:\Users...\Algorithm Test.cu"

我的测试代码涉及一个使用 1 个线程运行的测试内核来模拟我的实际代码的工作方式。

#define CUB_STDERR
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#define NUMPOINTS 5*1024    
#define NUMTHREADSPERBLOCK 256
#define NUMBLOCKSPERGRID 32
#define MAXLENGTH NUMTHREADSPERBLOCK*NUMBLOCKSPERGRID   //Use multiple of 256

#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
#include <fstream>
#include <iomanip>                      //display 2 decimal places
#include <math.h>
#include <arrayFunctions.h>
#include <ctime>                        //For timers
#include <sstream>                      //For the filename
#include <assert.h>
#include <stdlib.h>
#include <cub/cub.cuh>


#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
#undef  assert
#define assert(arg)
#endif

__device__ __constant__ int numThreads = NUMTHREADSPERBLOCK;    //Number of threads per block
__device__ __constant__ int numBlocks = NUMBLOCKSPERGRID;       //Number of blocks per grid
__device__ __constant__ int maxlength = MAXLENGTH;  
__device__ double concSort[MAXLENGTH];

inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
    //Error checking
    if (code != cudaSuccess) 
    {
        fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
        if (abort) exit(code);
    }
}


using namespace std;
using namespace cub;

__global__ void test(double*);

int main(int argc, char** argv)
{
    cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
    cudaSetDevice(0);
    std::cout << std::fixed;                //Displays 2 decimal places.
    std::cout << std::setprecision(16);     //Displays 2 decimal places.

    const int maxlength = MAXLENGTH;        //Number of discrete concentrations tracking.
    double concs[maxlength] = {};           //Meant to store the initial concentrations .


    std::cout<<" ";
    std::cout<<"\n";    

    double *d_concs;                //The concentrations for a specific timestep.

    size_t size_concs = sizeof(concs);


    gpuErrchk(cudaMalloc((void**)&d_concs, size_concs));
    gpuErrchk(cudaMemcpy(d_concs, &concs, size_concs, cudaMemcpyHostToDevice));


    //Run the integrator.
    std::clock_t start;
    double duration;
    start = std::clock();

    test<<<1,1>>>(d_concs);

    std::cout<<"\n";

    gpuErrchk( cudaPeekAtLastError() );
    gpuErrchk( cudaDeviceSynchronize() );
    duration = (std::clock() - start)/ (double) CLOCKS_PER_SEC;
    std::cout<<"The calculation took this long: "<< duration <<'\n';

    std::cout<<"\n";

    gpuErrchk(cudaMemcpy(concs, d_concs, size_concs, cudaMemcpyDeviceToHost));
    cudaDeviceSynchronize();

    ///*
    for (int i=0; i < 33; i++)
    {
        std::cout << "\n";
        std::cout << concs[i];
    }
    //*/

    cudaDeviceReset();  //Clean up all memory.
    return 0;
}



__global__ void test(double* concs)
{
    int size=MAXLENGTH;
    int threads = NUMTHREADSPERBLOCK;
    int blocks = NUMBLOCKSPERGRID;

    for (int i = 0; i < size; i++)
        concs[i] = i * .00000000001;

    ///*
    void *d_temp_storage = NULL;
    size_t temp_storage_bytes = 0;
    CubDebug(cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, concs, concs, size));
    cudaMalloc(&d_temp_storage, temp_storage_bytes);
    CubDebug(cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, concs, concs, size));

}

我收到以下错误,来自以下帖子,表明定义此宏 CUB_CDP 是我的错误:

1>C:/Users/Karsten Chu/New Google Drive/Research/Visual Studio 2012/Projects/Dynamic Parallelism Test/Dynamic Parallelism Test/Algorithm Test.cu(146): error : calling a __host__ function("exit") from a __global__ function("test") is not allowed
1>C:/Users/Karsten Chu/New Google Drive/Research/Visual Studio 2012/Projects/Dynamic Parallelism Test/Dynamic Parallelism Test/Algorithm Test.cu(148): error : calling a __host__ function("exit") from a __global__ function("test") is not allowed

https://groups.google.com/forum/#!searchin/cub-users/CUB_CDP/cub-users/9ltP52Ohosg/uM9_RUy11e0J

我会很感激任何帮助,因为我认为学习如何使用这个库真的可以帮助我开始专注于物理,而不是……除了物理之外的任何东西。

4

1 回答 1

2

从测试内核中的 cub 调用中删除CubDebugExit()包装器。然后你的代码将编译。

而不是这个:

CubDebugExit(cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, concs, concSort, maxlength));
cudaMalloc(&d_temp_storage, temp_storage_bytes);
CubDebugExit(cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, concs, concSort, maxlength));

做这个:

cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, concs, concSort, maxlength);
cudaMalloc(&d_temp_storage, temp_storage_bytes);
cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, concs, concSort, maxlength);

CubDebugExit在设备代码中不可用。

如果您愿意,也可以使用包装器/宏CubDebug()来代替。CubDebugExit()

于 2014-08-18T01:47:51.327 回答