如何使用CUDA编程来计算和处理正确的数字 [英] How to use CUDA programming to calculate and process the correct number

查看:138
本文介绍了如何使用CUDA编程来计算和处理正确的数字的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

带宽测试 - 测试内存带宽。



对PCIE功能尤为重要。不同的MB具有不同的PCIE功能。



CUDA适配器性能取决于PCIE的能力。这可能是性能瓶颈。



在下面的编程演练中,计算和利用内存带宽所需的时钟周期数必须是计算的。



(1)程序中的并行化 - 使用256个线程



(2)改进内存访问模式



(3)在计算中使用BLOCKS测试并行化使用512/1024



(4) />


(5)利用共享内存



(6)使用Treesum算法改善计算oerfdormance



(7)解决内存带冲突问题,在共享内存中应用Treesum算法时遇到



< b>我尝试了什么:



我自己的编码样本有一些错误 -







Bandwidth test - test memory bandwidth.

Especially important for PCIE capability. Different MB has different PCIE capability.

The CUDA adaptor performance is depend on the capability of PCIE. It could be the performance bottleneck.

On the following programming drills, the number of clock cycles necessary for computation and utilised memory bandwidth have to be computing.

(1) parallelization in the programs - using 256 threads

(2) improving the memory access modes

(3) testing the parallelization by using 512/1024

(4) utilizing BLOCKS in the computation

(5) utilizing shared memory

(6) improving the computation oerfdormance by using a Treesum algorithm

(7) resolving the memory band conflict issue, encountered in applying Treesum algorithm with the shared memory

What I have tried:

My own coding sample however have some errors -



#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <time.h>
#include <math.h>

int main()
{
	float *a, *b, *c, *d;
	int n = 1000;

	if (!InitCUDA())
		return 0;
	a = (float*)malloc(sizeof(float)* n * n);
	b = (float*)malloc(sizeof(float)* n * n);
	c = (float*)malloc(sizeof(float)* n * n);
	d = (float*)malloc(sizeof(float)* n * n);

	srand(0);
	matgen(a, n, n);
	matgen(b, n, n);

	clock_t time = matmultCUDA(a, n, b, n, c, n, n);
	matmult(a, n, b, n, d, n, n);
	compare_mat(c, n, d, n, n);

	double sec = (double)time / CLOCKS_PER_SEC;

	printf("Time used: %.2f (%.2lf GFLOPS)\n", sec, 2.0 * n * n * n / (sec * 1E9));

	return 0;
}
void matgen(float* a, int lda, int n)
{
	int i, j;
	for (i = 0; i < n; i++)
	{
		for (j = 0; j < n; j++)
		{
			a[i* lda + j] = (float)rand() / RAND_MAX + (float)rand() / (RAND_MAX * RAND_MAX);
		}
	}
}

void matmult(const float* a, int lda, const float* b, int ldb, float* c, int ldc, int n)
{
	int i, j, k;

	for (i = 0; i< n; i++)
	{
		for (j = 0; j < n; j++)
		{
			double t = 0;
			for (k = 0; k < n; k++) {
				t += a[i* lda + k] * b[k * ldb + j];
			}
			c[i* ldc + j] = t;
		}
	}

	void compare_mat(const float* a, int lda, const float* b, int ldb, int n)
	{
		float max_err = 0;
		float average_err = 0; inti, j;
		for (i = 0; i< n; i++)
		{
			for (j = 0; j < n; j++)
			{
				if (b[i* ldb + j] != 0)
				{
					float err = fabs((a[i* lda + j] - b[i* ldb + j]) / b[i* ldb + j]);
					if (max_err< err) max_err = err;
					average_err += err;
				}
			}
		}
		printf("Max error: %g Average error:%g\n", max_err, average_err / (n * n));
	}

#define NUM_THREADS 256

clock_t matmultCUDA(const float* a, int lda, const float* b, int ldb, float* c, int ldc, int n)
{
	float *ac, *bc, *cc;
	clock_tstart, end;

	start = clock();
	cudaMalloc((void**)&ac, sizeof(float)* n * n);
	cudaMalloc((void**)&bc, sizeof(float)* n * n);
	cudaMalloc((void**)&cc, sizeof(float)* n * n);

	cudaMemcpy2D(ac, sizeof(float)* n, a, sizeof(float)* lda, sizeof(float)* n, n, cudaMemcpyHostToDevice);
	cudaMemcpy2D(bc, sizeof(float)* n, b, sizeof(float)* ldb, sizeof(float)* n, n, cudaMemcpyHostToDevice);

	intblocks = (n + NUM_THREADS - 1) / NUM_THREADS;
	matMultCUDA<<<blocks * n, NUM_THREADS >>>(ac, n, bc, n, cc, n, n);

	cudaMemcpy2D(c, sizeof(float)* ldc, cc, sizeof(float)* n, sizeof(float)* n, n, cudaMemcpyDeviceToHost);

	cudaFree(ac);
	cudaFree(bc);
	cudaFree(cc);

	end = clock();
	return end - start;
}

__global__ static void matMultCUDA(const float* a, size_t lda, const float* b, size_t ldb, float* c, size_t ldc, int n)
{
	__shared__ float
	matA[BLOCK_SIZE][BLOCK_SIZE];
	__shared__ float matB[BLOCK_SIZE][BLOCK_SIZE];
	constinttidc = threadIdx.x;
	constinttidr = threadIdx.y;
	constintbidc = blockIdx.x* BLOCK_SIZE;
	constintbidr = blockIdx.y* BLOCK_SIZE;
	int i, j;

	float results = 0;
	float comp = 0;

	for (j = 0; j < n; j += BLOCK_SIZE)
	{
		matA[tidr][tidc] = a[(tidr + bidr) * lda + tidc + j];
		matB[tidr][tidc] = b[(tidr + j) * ldb + tidc + bidc];

		__syncthreads();

		for (i = 0; i< BLOCK_SIZE; i++)
		{
			float t; comp -= matA[tidr][i] * matB[i][tidc];
			t = results - comp; comp = (t - results) + comp; results = t;
		}

		__syncthreads();
	}

	c[(tidr + bidr) * ldc + tidc + bidc] = results;
}

推荐答案

最好告诉我错误,不是我告诉你所有答案
It is better you tell me about the errors not me tell you all the answers


这篇关于如何使用CUDA编程来计算和处理正确的数字的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆