与CUDA加速在自己的code不能使用CHOLMOD [英] Cannot use CHOLMOD with CUDA acceleration in my own code
问题描述
我试图使用 CHOLMOD
与 CUDA
加速 SuiteSparse 4.4.4
。我根据用户手册编译它,我可以成功运行 gpu.sh
在演示
文件夹,这表明, GPU所做的工作的一部分。然而,当我尝试使用 CHOLMOD
来经营自己的code,我发现GPU的呼叫数量是始终为0。我设置共> useGPU
1,环境变量 CHOLMOD_USE_GPU
也设置为1。我的Makefile如下所示。库路径是正确的。对我来说任何建议?
其实我应该提到,我只是运行一个简单的测试用例来求解线性方程。
我试了矩阵用友稀疏矩阵集合,但 nvprof
显示没有 CUDA
申请异形。
一些我试过矩阵:
bmw7st_1: HTTP://www.cise.ufl。教育/科研/稀疏/矩阵/ GHS_psdef / bmw7st_1.html
nd6k:
http://www.cise.ufl.edu/research/sparse /matrices/ND/nd6k.html
nd24k:
http://www.cise.ufl.edu/research/sparse /matrices/ND/nd24k.html
code:
的#include<&stdio.h中GT;
#包括LT&;&time.h中GT;
#包括LT&;&unistd.h中GT;
#包括LT&;&ASSERT.H GT;
#包括LT&; SYS / time.h中>
#包括cholmod.hINT主要(无效)
{
timeval结构T1,T2;
双elapsedTime; 为const char * matFile =../bmw7st_1.mtx;
FILE * FP = FOPEN(matFile,R);
断言(FP!= NULL); cholmod_sparse * A;
cholmod_dense * X,* B;
cholmod_factor * L; cholmod_common * C =(cholmod_common *)malloc的(的sizeof(cholmod_common));
cholmod_start(C); / *启动CHOLMOD * /
C-> useGPU = 1;
C-> supernodal = CHOLMOD_SUPERNODAL; A = cholmod_read_sparse(FP,C); / *在矩阵中读* /
cholmod_print_sparse(A,A,C); / *打印矩阵* /
FCLOSE(FP); 若(a == NULL || A-> STYPE == 0)/ * A必须是对称的* /
{
cholmod_free_sparse(安培; A,C);
cholmod_finish(C);
返回(0);
} B = cholmod_ones(A-> nrow,1,A->的xtype,C); / * B =那些(N,1)* / 函数gettimeofday(&放大器; T1,NULL);
L = cholmod_analyze(A,C); / * *分析/
cholmod_factorize(A,L,C); / * *比化/
X = cholmod_solve(CHOLMOD_A,L,B,C); / *解决Ax = b的* /
函数gettimeofday(&放大器; t2时,NULL);
elapsedTime =(t2.tv_sec - t1.tv_sec)* 1000.0;
elapsedTime + =(t2.tv_usec - t1.tv_usec)/ 1000.0;
的printf(时间:%.4f MS \\ N,elapsedTime); cholmod_free_factor(安培; L,C); / *免费矩阵* /
cholmod_free_sparse(安培; A,C);
cholmod_free_dense(安培; X,C);
cholmod_free_dense(安培; B,C);
cholmod_finish(C); / *完成CHOLMOD * /
返回(0);
}
的Makefile:
CC = GCCCFLAGS = -g -Wall -O2 \\
-lrt -lgfortran \\
-gdwarf-2LIBS = $(CHOLMOD)/Lib/libcholmod.a \\
$(AMD)/Lib/libamd.a \\
$(COLAMD)/Lib/libcolamd.a \\
$(LAPACK)/liblapack.a \\
$(OPENBLAS)/lib/libopenblas.so \\
$(XERBLA)/libcerbla.a \\
$(METIS)/libmetis.a \\
$(CAMD)/Lib/libcamd.a \\
$(CCOLAMD)/Lib/libccolamd.a \\
$(SUITESPARSE)/SuiteSparse_config/libsuitesparseconfig.a \\
$(CUDART_LIB)\\
$(CUBLAS_LIB)HEADER_DIR = $(CHOLMOD)/纳入
CONFIG_HEADER_DIR = $(SUITESPARSE)/ SuiteSparse_configOBJ_DIR =。BIN_DIR =。INCLUDES = -I $(HEADER_DIR)\\
-I $(CONFIG_HEADER_DIR)SRCS = $(LS壳的* .c)OBJS = $(SRCS:.C =的.o)OBJS_BUILD = $(壳LS $(OBJ_DIR)/ * O)APP = PROGRM = RM -f所有:$(APP)$(APP):$(OBJS)
$(CC)$(CFLAGS)-o $(BIN_DIR)/ $(APP)$(OBJS_BUILD)$(LIBS)%的.o:%.C $(HEADER_DIR)/ * H $(CONFIG_HEADER_DIR)/ * H
$(CC)$(CFLAGS)$(含)-C $< -o $(OBJ_DIR)/ $ @清洁:
$(RM)$(OBJS_BUILD)$(APP)
参照第7,CHOLMOD UserGuide.pdf的P34附带SuiteSparse 4.4.4:
只有CHOLMOD的长整型版本可以利用GPU加速。
块引用>长整型版本由API调用区分像
cholmod_l_start
而不是cholmod_start
。通过修改以下程序:
的#include<&stdio.h中GT;
#包括LT&;&time.h中GT;
#包括LT&;&unistd.h中GT;
#包括LT&;&ASSERT.H GT;
#包括LT&; SYS / time.h中>
#包括cholmod.hINT主要(无效)
{
timeval结构T1,T2;
双elapsedTime; 为const char * matFile =../Matrix/nd6k/nd6k.mtx;
FILE * FP = FOPEN(matFile,R);
断言(FP!= NULL); cholmod_sparse * A;
cholmod_dense * X,* B;
cholmod_factor * L; cholmod_common * C =(cholmod_common *)malloc的(的sizeof(cholmod_common));
cholmod_l_start(C); / *启动CHOLMOD * /
C-> useGPU = 1;
C-> supernodal = CHOLMOD_SUPERNODAL; A = cholmod_l_read_sparse(FP,C); / *在矩阵中读* /
cholmod_l_print_sparse(A,A,C); / *打印矩阵* /
FCLOSE(FP); 若(a == NULL || A-> STYPE == 0)/ * A必须是对称的* /
{
cholmod_l_free_sparse(安培; A,C);
cholmod_l_finish(C);
返回(0);
} B = cholmod_l_ones(A-> nrow,1,A->的xtype,C); / * B =那些(N,1)* / 函数gettimeofday(&放大器; T1,NULL);
L = cholmod_l_analyze(A,C); / * *分析/
cholmod_l_factorize(A,L,C); / * *比化/
X = cholmod_l_solve(CHOLMOD_A,L,B,C); / *解决Ax = b的* /
函数gettimeofday(&放大器; t2时,NULL);
elapsedTime =(t2.tv_sec - t1.tv_sec)* 1000.0;
elapsedTime + =(t2.tv_usec - t1.tv_usec)/ 1000.0;
的printf(时间:%.4f MS \\ N,elapsedTime);
cholmod_l_gpu_stats(C);
cholmod_l_free_factor(安培; L,C); / *免费矩阵* /
cholmod_l_free_sparse(安培; A,C);
cholmod_l_free_dense(安培; X,C);
cholmod_l_free_dense(安培; B,C);
cholmod_l_finish(C); / *完成CHOLMOD * /
返回(0);
}我得到这样的输出:
$ ./prog
CHOLMOD疏:A:18000按18000,NZ 3457658,上。好
时间:14570.3950毫秒CHOLMOD GPU / CPU统计:
SYRK CPU调用888时1.0637e-01
GPU调用213时8.9194e-02
GEMM CPU调用711时1.1511e-01
GPU调用213时1.9351e-03
POTRF CPU调用217时3.2180e-02
GPU调用5次1.5788e-01
TRSM CPU调用217时6.0409e-01
GPU调用4时5.6943e-02
一次在BLAS:CPU 8.5774e-01 GPU 3.0595e-01总:1.1637e + 00
装配时间0.0000E + 00 0.0000E + 00
$指示正在使用的GPU。
I am trying to use
CHOLMOD
withCUDA
acceleration inSuiteSparse 4.4.4
. I compiled it according to the user guide and I could rungpu.sh
underDemo
folder successfully, which showed that the GPU was doing part of the work. However, when I tried to run my own code usingCHOLMOD
, I found that the number of GPU calls was always 0. I do setCommon->useGPU
to 1, and the environment variableCHOLMOD_USE_GPU
is also set to 1. My Makefile is like the following. The library paths are correct. Any suggestion for me?Actually I should have mentioned that I am just running a simplest test case to solve a linear system.
I tried several matrices from UF Sparse Matrix Collection, but
nvprof
showed that noCUDA
application was profiled.Some of the matrices I tried:
bmw7st_1: http://www.cise.ufl.edu/research/sparse/matrices/GHS_psdef/bmw7st_1.html
nd6k: http://www.cise.ufl.edu/research/sparse/matrices/ND/nd6k.html
nd24k: http://www.cise.ufl.edu/research/sparse/matrices/ND/nd24k.html
Code:
#include <stdio.h> #include <time.h> #include <unistd.h> #include <assert.h> #include <sys/time.h> #include "cholmod.h" int main (void) { struct timeval t1, t2; double elapsedTime; const char* matFile = "../bmw7st_1.mtx"; FILE* fp = fopen(matFile, "r"); assert(fp != NULL); cholmod_sparse *A ; cholmod_dense *x, *b; cholmod_factor *L ; cholmod_common* c = (cholmod_common*)malloc(sizeof(cholmod_common)); cholmod_start (c) ; /* start CHOLMOD */ c->useGPU = 1; c->supernodal = CHOLMOD_SUPERNODAL; A = cholmod_read_sparse (fp, c) ; /* read in a matrix */ cholmod_print_sparse (A, "A", c) ; /* print the matrix */ fclose(fp); if (A == NULL || A->stype == 0) /* A must be symmetric */ { cholmod_free_sparse (&A, c) ; cholmod_finish (c) ; return (0) ; } b = cholmod_ones (A->nrow, 1, A->xtype, c) ; /* b = ones(n,1) */ gettimeofday(&t1, NULL); L = cholmod_analyze (A, c) ; /* analyze */ cholmod_factorize (A, L, c) ; /* factorize */ x = cholmod_solve (CHOLMOD_A, L, b, c) ; /* solve Ax=b */ gettimeofday(&t2, NULL); elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; printf("Time: %.4f ms\n", elapsedTime); cholmod_free_factor (&L, c) ; /* free matrices */ cholmod_free_sparse (&A, c) ; cholmod_free_dense (&x, c) ; cholmod_free_dense (&b, c) ; cholmod_finish (c) ; /* finish CHOLMOD */ return (0) ; }
Makefile:
CC = gcc CFLAGS = -g -Wall -O2 \ -lrt -lgfortran \ -gdwarf-2 LIBS = $(CHOLMOD)/Lib/libcholmod.a \ $(AMD)/Lib/libamd.a \ $(COLAMD)/Lib/libcolamd.a \ $(LAPACK)/liblapack.a \ $(OPENBLAS)/lib/libopenblas.so \ $(XERBLA)/libcerbla.a \ $(METIS)/libmetis.a \ $(CAMD)/Lib/libcamd.a \ $(CCOLAMD)/Lib/libccolamd.a \ $(SUITESPARSE)/SuiteSparse_config/libsuitesparseconfig.a \ $(CUDART_LIB) \ $(CUBLAS_LIB) HEADER_DIR = $(CHOLMOD)/Include CONFIG_HEADER_DIR = $(SUITESPARSE)/SuiteSparse_config OBJ_DIR = . BIN_DIR = . INCLUDES = -I$(HEADER_DIR) \ -I$(CONFIG_HEADER_DIR) SRCS = $(shell ls *.c) OBJS = $(SRCS:.c=.o) OBJS_BUILD = $(shell ls $(OBJ_DIR)/*.o) APP = prog RM = rm -f all: $(APP) $(APP): $(OBJS) $(CC) $(CFLAGS) -o $(BIN_DIR)/$(APP) $(OBJS_BUILD) $(LIBS) %.o: %.c $(HEADER_DIR)/*.h $(CONFIG_HEADER_DIR)/*.h $(CC) $(CFLAGS) $(INCLUDES) -c $< -o $(OBJ_DIR)/$@ clean: $(RM) $(OBJS_BUILD) $(APP)
解决方案Referring to section 7, p34 of the CHOLMOD UserGuide.pdf that ships with SuiteSparse 4.4.4:
Only the long integer version of CHOLMOD can leverage GPU acceleration.
The long integer version is distinguished by api calls like
cholmod_l_start
instead ofcholmod_start
.With the following modifications to your program:
#include <stdio.h> #include <time.h> #include <unistd.h> #include <assert.h> #include <sys/time.h> #include "cholmod.h" int main (void) { struct timeval t1, t2; double elapsedTime; const char* matFile = "../Matrix/nd6k/nd6k.mtx"; FILE* fp = fopen(matFile, "r"); assert(fp != NULL); cholmod_sparse *A ; cholmod_dense *x, *b; cholmod_factor *L ; cholmod_common* c = (cholmod_common*)malloc(sizeof(cholmod_common)); cholmod_l_start (c) ; /* start CHOLMOD */ c->useGPU = 1; c->supernodal = CHOLMOD_SUPERNODAL; A = cholmod_l_read_sparse (fp, c) ; /* read in a matrix */ cholmod_l_print_sparse (A, "A", c) ; /* print the matrix */ fclose(fp); if (A == NULL || A->stype == 0) /* A must be symmetric */ { cholmod_l_free_sparse (&A, c) ; cholmod_l_finish (c) ; return (0) ; } b = cholmod_l_ones (A->nrow, 1, A->xtype, c) ; /* b = ones(n,1) */ gettimeofday(&t1, NULL); L = cholmod_l_analyze (A, c) ; /* analyze */ cholmod_l_factorize (A, L, c) ; /* factorize */ x = cholmod_l_solve (CHOLMOD_A, L, b, c) ; /* solve Ax=b */ gettimeofday(&t2, NULL); elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; printf("Time: %.4f ms\n", elapsedTime); cholmod_l_gpu_stats(c); cholmod_l_free_factor (&L, c) ; /* free matrices */ cholmod_l_free_sparse (&A, c) ; cholmod_l_free_dense (&x, c) ; cholmod_l_free_dense (&b, c) ; cholmod_l_finish (c) ; /* finish CHOLMOD */ return (0) ; }
I get output like this:
$ ./prog CHOLMOD sparse: A: 18000-by-18000, nz 3457658, upper. OK Time: 14570.3950 ms CHOLMOD GPU/CPU statistics: SYRK CPU calls 888 time 1.0637e-01 GPU calls 213 time 8.9194e-02 GEMM CPU calls 711 time 1.1511e-01 GPU calls 213 time 1.9351e-03 POTRF CPU calls 217 time 3.2180e-02 GPU calls 5 time 1.5788e-01 TRSM CPU calls 217 time 6.0409e-01 GPU calls 4 time 5.6943e-02 time in the BLAS: CPU 8.5774e-01 GPU 3.0595e-01 total: 1.1637e+00 assembly time 0.0000e+00 0.0000e+00 $
indicating the GPU is being used.
这篇关于与CUDA加速在自己的code不能使用CHOLMOD的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!