-2

私は C と CUDA の両方が初めてで、内積関数を書いていましたが、正しい結果が得られません。親切な魂が私を見てくれますか?

また、2つ質問がありますが、

  1. dot() が正しく機能しない理由と、
  2. 57 行目で、product[index] ではなく product[threadIdx.x] になっているのはなぜですか? 書けないかな

    製品[インデックス] = a[インデックス] * b[インデックス]; ... if(index==0) {...} そして、このようにゼロ番目のスレッドで各要素を合計しますか?

どうもありがとう。

デバイスクエリ:

  Device 0: "GeForce GTX 570"
  CUDA Driver Version / Runtime Version          6.0 / 5.5
  CUDA Capability Major/Minor version number:    2.0

Makefile: nvcc -arch=sm_20 cuda_test.cu -o cuda_test

cuda_test.cu で:

#include <stdio.h> // printf, scanf, NULL etc.
#include <stdlib.h> // malloc, free, rand etc.

#define N (3) //Number of threads we are using (also, length of array declared in main)

#define THREADS_PER_BLOCK (1) //Threads per block we are using

#define N_BLOCKS (N/THREADS_PER_BLOCK)

/* Function to generate a random integer between 1-10 */
void random_ints (int *a, int n)
{
    int i;
    srand(time(NULL)); //Seed rand() with current time
    for(i=0; i<n; i++)
    { 
        a[i] = rand()%10 + 1; 
    }
    return;
}

/* Kernel that adds two integers a & b, stores result in c */
__global__ void add(int *a, int *b, int *c) {
//global indicates function that runs on 
//device (GPU) and is called from host (CPU) code

    int index = threadIdx.x + blockIdx.x * blockDim.x;

    //threadIdx.x : thread index
    //blockIdx.x  : block index
    //blockDim.x  : threads per block
    //hence index is a thread counter across all blocks
    c[index] = a[index] + b[index];

//note that pointers are used for variables
//add() runs on device, so they must point to device memory
//need to allocate memory on GPU
}

/* Kernel for dot product */
__global__ void dot(int *a, int *b, int *c)
{
    __shared__ int product[THREADS_PER_BLOCK]; //All threads in a block must be able 
                                               //to access this array

    int index = threadIdx.x + blockIdx.x * blockDim.x; //index

    product[threadIdx.x] = a[index] * b[index]; //result of elementwise
                                                //multiplication goes into product

    //Make sure every thread has finished
    __syncthreads();

    //Sum the elements serially to obtain dot product
    if( 0 == threadIdx.x ) //Pick one thread to sum, otherwise all will execute
    {
        int sum = 0;
        for(int j=0; j < THREADS_PER_BLOCK; j++) sum += product[j];
        //Done!
        atomicAdd(c,sum);
    }
}

int main(void)
{

    int *a, *b, *c, *dotProduct; //host copies of a,b,c etc
    int *d_a, *d_b, *d_c, *d_dotProduct; //device copies of a,b,c etc

    int size = N * sizeof(int); //size of memory that needs to be allocated

    int i=0; //iterator

    //Allocate space for device copies of a,b,c
    cudaMalloc((void **)&d_a, size);
    cudaMalloc((void **)&d_b, size);
    cudaMalloc((void **)&d_c, size);

    //Setup input values
    a = (int *)malloc(size); random_ints(a,N);
    b = (int *)malloc(size); random_ints(b,N);
    c = (int *)malloc(size);

    //Copy inputs to device
    cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
    cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);

    //Launch add() kernel on GPU
    add<<<N_BLOCKS,THREADS_PER_BLOCK>>>(d_a, d_b, d_c);
    // triple angle brackets mark call from host to device
    // this is also known as a kernel launch
    // N/THREADS_PER_BLOCK = NO. OF BLOCKS

    //Copy result back to host
    cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);

    //Output results
    printf("a = {");
    for (i=0; i<N; i++) printf(" %d",a[i]);
    printf(" }\n");

    printf("b = {");
    for (i=0; i<N; i++) printf(" %d",b[i]);
    printf(" }\n");

    printf("c = {");
    for (i=0; i<N; i++) printf(" %d",c[i]);
    printf(" }\n");

    //Calculate dot product of a & b
    dotProduct = (int *)malloc(sizeof(int)); //Allocate host memory to dotProduct
    *dotProduct = 0; //initialise to zero
    cudaMalloc((void **)&d_dotProduct, sizeof(int)); //Allocate device memory to d_dotProduct
    dot<<<N_BLOCKS,THREADS_PER_BLOCK>>>(d_a, d_b, d_dotProduct); //Perform calculation
    cudaMemcpy(dotProduct, d_dotProduct, sizeof(int), cudaMemcpyDeviceToHost); //Copy result into dotProduct
    printf("\ndot(a,b) = %d\n", *dotProduct); //Output result

    //Cleanup
    free(a); free(b); free(c); free(dotProduct);
    cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFree(d_dotProduct);

    return 0;
} //End of main
4

2 に答える 2

2

talonmies が言うように、他の誰かがあなたのコードを実行できるようにしてください。行番号の埋め込みは役に立ちません。

他の情報がない場合の最良の推測は、d_dotProductゼロに初期化していないことです。これを行うことができますcudaMemset()- 別の初期値が必要な場合はcudaMemcpy()、ホストから初期値を取得するか、別のカーネルを起動して初期化できますが、この場合cudaMemset()(ホスト上と同等memset()) で十分です。

N_BLOCKS*THREADS_PER_BLOCKがと等しくない場合もありますsize

2番目の質問についてproductは、 size のブロックごとの配列です。THREADS_PER_BLOCKこれにアクセスするとproduct[index]範囲外になります。

于 2014-01-03T21:38:50.570 に答える
-2

問題が解決しました!「product」配列の個々の要素を合計する前に「*c = 0」を設定する必要がありました。

/* Kernel for dot product */
__global__ void dot(int *a, int *b, int *c)
{
    __shared__ int product[THREADS_PER_BLOCK]; //All threads in a block must be able 
                                               //to access this array

    int index = threadIdx.x + blockIdx.x * blockDim.x; //index

    product[threadIdx.x] = a[index] * b[index]; //result of elementwise
                                                //multiplication goes into product

    if(index==0) *c = 0; //Ask one thread to set c to zero.

    //Make sure every thread has finished
    __syncthreads();    

    //Sum the elements serially to obtain dot product
    if( 0 == threadIdx.x ) //Every block to do c += sum
    {
        int sum = 0;
        for(int j=0; j < THREADS_PER_BLOCK; j++) sum += product[j];
        //Done!
        atomicAdd(c,sum);
    }
}
于 2014-01-04T23:25:52.063 に答える