如何MPI_Gatherv来自处理器,其中,每一个过程可以发送不同的列数列 [英] How to MPI_Gatherv columns from processor, where each process may send different number of columns

查看:114
本文介绍了如何MPI_Gatherv来自处理器,其中,每一个过程可以发送不同的列数列的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

2说出过程参与。流程0(0级)的

  A = {A D
      bé
      缩略词
    }

和流程1(1级)的

  A = {绿
      H
      一世
    }

我想两个处理器发送这些列序号为0,这样等级0将有发言权另一个二维数组以下。

  B = {A D G
      Bé^ h
      缩略词我
    }

我创建的MPI_Gatherv新列的数据类型,并正尝试以下code,这让我没有在那里。

我的具体问题是:


  1. 我应该如何处理这个

  2. 应该是什么send_type和recv_type。

  3. 如何应指定位移(如果他们在新的数据类型或MPI_CHAR期限)

感谢。

这是我的code:

 的#include<&stdio.h中GT;
#包括LT&;&mpi.h GT;INT主(INT ARGC,CHAR *的argv [])
{
  INT numprocs,my_rank;
   长INT I,J;
   MPI_Status状态;
   焦炭** A;
   焦炭** B:
  MPI_INIT(安培; ARGC,&安培; argv的);
  MPI_Comm_size(MPI_COMM_WORLD,&安培; numprocs);
  MPI_Comm_rank(MPI_COMM_WORLD,&安培; my_rank);  如果(my_rank == 0)
  {
    A =(字符**)释放calloc((3)的sizeof(字符*));
    B =(字符**)释放calloc((3)的sizeof(字符*));
    对于(I = 0;我3; ++ⅰ)
    {
       A [i] =(字符*)释放calloc(2,sizeof的(炭));
       B〔I] =(字符*)释放calloc(3的sizeof(字符));
    }    A [0] [0] ='A';
    A [1] [0] ='B';
    A [2] [0] ='c'的;
    A [0] [1] ='D';
    A [1] [1] ='E';
    A [2] [1] ='F';
  }
  其他
  {
    A =(字符**)释放calloc((3)的sizeof(字符*));
    对于(I = 0;我3; ++ⅰ)
    {
       A [i] =(字符*)释放calloc(1,sizeof的(炭));
    }
    A [0] [0] ='G';
    A [1] [0] ='H';
    A [2] [0] ='我';  }
  MPI_Datatype b_col_type;
  MPI_Type_vector(3,1,1,MPI_CHAR,&放大器; b_col_type);
  MPI_Type_commit(安培; b_col_type);
  诠释displs [2] = {0,2};
  诠释recvcounts [2] = {2,1};
  MPI_Gatherv(安培; A [0] [0],recvcounts [my_rank],b_col_type,和B [0] [0],recvcounts,displs,b_col_type,0,MPI_COMM_WORLD);
  如果(my_rank == 0)
  {
    对于(I = 0;我3; ++ⅰ)
    {
      为(J = 0; J&下; 3; ++ j)条
        的printf(%C,B [I] [J]);
      的printf(\\ n);
    }
  }
  MPI_Finalize();
  返回0;
}


解决方案

所以第一关 - 这又发表了MPI和C数组所有的时间 - 你不能真正做到标准C二维数组的事情。让我们来看看这个:

  A =(字符**)释放calloc((3)的sizeof(字符*));
对于(I = 0;我3; ++ⅰ)
{
   A [i] =(字符*)释放calloc(2,sizeof的(炭));
}

这一定会分配字符的3x2的阵列,但是你不知道所得到的数据是如何在内存布局。特别是,有一个在的所有的是 A [1] [0] 紧跟 A [0] [1难保] 。这使得它很难创建其中跨越所述数据结构的MPI数据类型!您需要分配3x2的连续字节,然后进行阵列点进去:

 的char ** charalloc2d(INT N,INT M){
    字符*数据=(字符*)释放calloc(N * M,的sizeof(字符));
    字符数组** =(字符**)释放calloc(N,sizeof的(字符*));
    的for(int i = 0; I< N;我++)
        阵列[I] =及(数据[I * M]);    返回数组;
}无效charfree2d(字符数组**){
    免费(数组[0]);
    免费(数组);
    返回;
}/ * ... * /
NROWS = 3;
NCOLS = 2;
A = charalloc2d(NROWS,NCOLS);

现在我们了解一下数组的布局,可以依靠,要建立数据类型。

您是在正确的轨道与数据类型上 -

  MPI_Datatype b_col_type;
MPI_Type_vector(3,1,1,MPI_CHAR,&放大器; b_col_type);
MPI_Type_commit(安培; b_col_type);

MPI_Type_vector的签名(计数,blocklen,步幅,OLD_TYPE,* NEWTYPE)。结果
我们要NROWS字符,进来的1块;但他们相距NCOLS;所以这是步幅。

请注意,这是真正的 A 数组的列类型,而不是 B ;的类型将取决于该阵列中的列数。因此,每个过程使用不同sendtype,这是好的。

  MPI_Datatype a_col_type;
MPI_Type_vector(NROWS,1,NCOLS,MPI_CHAR,&安培; a_col_type);
MPI_Type_commit(安培; a_col_type);

最后一步是 MPI_Gatherv ,在这里你必须是一个小可爱。诀窍是,我们要在同一时间发送(和接收)的这些事情多 - 也就是说,几个连续的。但是,我们需要在下一列不被NROWS * NCOLS字符了,但只是一个字符之遥。幸运的是,我们可以做到这一点通过设置上限的数据结构是一个字符从下界了,所以下一个元素没有在正确的地方开始。这是通过标准以及在事实上,一个他们的4.1.4节中的例子有铰链就可以了。

要做到这一点,我们创建了一个调整类型的结束只是一个字节开始后:

  MPI_Type_create_resized(a_col_type,0,1 *的sizeof(炭),放大器; new_a_col_type);
MPI_Type_commit(安培; new_a_col_type);

和同样为 B ;现在我们就可以发送和接收的这些倍数为人们所期望的。所以对我来说了以下工作:

 的#include<&stdio.h中GT;
#包括LT&;&stdlib.h中GT;
#包括LT&;&mpi.h GT;焦炭** charalloc2d(INT N,INT M){
    字符*数据=(字符*)释放calloc(N * M,的sizeof(字符));
    字符数组** =(字符**)释放calloc(N,sizeof的(字符*));
    的for(int i = 0; I< N;我++)
        阵列[I] =及(数据[I * M]);    返回数组;
}无效charfree2d(字符数组**){
    免费(数组[0]);
    免费(数组);
    返回;
}
INT主(INT ARGC,CHAR *的argv [])
{
    INT numprocs,my_rank;
    INT NROWS,NCOLS,totncols;
    长INT I,J;
    焦炭** A;
    焦炭** B:
    MPI_INIT(安培; ARGC,&安培; argv的);
    MPI_Comm_size(MPI_COMM_WORLD,&安培; numprocs);
    MPI_Comm_rank(MPI_COMM_WORLD,&安培; my_rank);    如果(my_rank == 0)
    {
        NROWS = 3;
        NCOLS = 2;
        totncols = 3;        A = charalloc2d(NROWS,NCOLS);
        B = charalloc2d(NROWS,totncols);        A [0] [0] ='A';
        A [1] [0] ='B';
        A [2] [0] ='c'的;
        A [0] [1] ='D';
        A [1] [1] ='E';
        A [2] [1] ='F';
    }
    其他
    {
        NROWS = 3;
        NCOLS = 1;
        A = charalloc2d(NROWS,NCOLS);
        B = charalloc2d(1,1); / *只是让gatherv生存* /
        A [0] [0] ='G';
        A [1] [0] ='H';
        A [2] [0] ='我';    }
    MPI_Datatype a_col_type,new_a_col_type;
    MPI_Type_vector(NROWS,1,NCOLS,MPI_CHAR,&安培; a_col_type);
    MPI_Type_commit(安培; a_col_type);    / *使类型有程度1个字符 - 现在的下一个
     *柱开始在阵列的下一个字符
     * /
    MPI_Type_create_resized(a_col_type,0,1 *的sizeof(炭),放大器; new_a_col_type);
    MPI_Type_commit(安培; new_a_col_type);    MPI_Datatype b_col_type,new_b_col_type;
    如果(my_rank == 0){
        MPI_Type_vector(NROWS,1,totncols,MPI_CHAR,&安培; b_col_type);
        MPI_Type_commit(安培; b_col_type);        / *类似的调整B柱* /
        MPI_Type_create_resized(b_col_type,0,1 *的sizeof(炭),放大器; new_b_col_type);
        MPI_Type_commit(安培; new_b_col_type);
    }    诠释displs [2] = {0,2};
    诠释recvcounts [2] = {2,1};
    MPI_Gatherv(A [0],recvcounts [my_rank],new_a_col_type,
                B [0],recvcounts,displs,new_b_col_type,
                0,MPI_COMM_WORLD);
    如果(my_rank == 0)
    {
        对于(I = 0;我3; ++ⅰ)
        {
            为(J = 0; J&下; 3; ++ j)条
                的printf(%C,B [I] [J]);
            的printf(\\ n);
        }
    }
    MPI_Finalize();
    返回0;
}

Say 2 processes are participating. Process 0 (rank 0) has

A = { a d
      b e
      c f
    }

and process 1 (rank 1) has

A = { g
      h
      i
    }

I want both processors to send these columns to rank 0 so that rank 0 will have the following in say another 2D-array.

B = { a d g
      b e h
      c f i
    }

I create a new column data type for the MPI_Gatherv and am trying the following code, which is getting me no where.

My specific questions are:

  1. How should I approach this
  2. What should be the send_type and recv_type.
  3. How should displacements be specified ( should they be in term of the new data type or MPI_CHAR)

Thanks.

This is my code:

#include <stdio.h>
#include <mpi.h>

int main(int argc, char *argv[])
{
  int numprocs, my_rank;
   long int i, j;
   MPI_Status status;
   char **A;
   char **B;
  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
  MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);

  if(my_rank == 0)
  {
    A = (char **)calloc((3), sizeof(char *));
    B = (char **)calloc((3), sizeof(char *));
    for(i=0; i<3; ++i)
    {
       A[i] = (char *)calloc(2, sizeof(char));
       B[i] = (char *)calloc(3, sizeof(char));
    }

    A[0][0] = 'a';
    A[1][0] = 'b';
    A[2][0] = 'c';
    A[0][1] = 'd';
    A[1][1] = 'e';
    A[2][1] = 'f';
  }
  else
  {
    A = (char **)calloc((3), sizeof(char *));
    for(i=0; i<3; ++i)
    {
       A[i] = (char *)calloc(1, sizeof(char));
    }
    A[0][0] = 'g';
    A[1][0] = 'h';
    A[2][0] = 'i';

  }
  MPI_Datatype b_col_type;
  MPI_Type_vector(3, 1, 1, MPI_CHAR, &b_col_type);
  MPI_Type_commit(&b_col_type);
  int displs[2] = {0, 2};
  int recvcounts[2] = {2, 1};
  MPI_Gatherv(&A[0][0], recvcounts[my_rank], b_col_type, &B[0][0], recvcounts, displs,    b_col_type, 0, MPI_COMM_WORLD);
  if(my_rank == 0)
  {
    for(i=0; i<3; ++i)
    {
      for(j=0; j<3; ++j)
        printf("%c ", B[i][j]);
      printf("\n");
    }
  }
  MPI_Finalize();
  return 0;
}

解决方案

So first off - and this comes up with MPI and C arrays all the time - you can't really do the standard C two dimensional array thing. Let's look at this:

A = (char **)calloc((3), sizeof(char *));
for(i=0; i<3; ++i)
{
   A[i] = (char *)calloc(2, sizeof(char));
}

This will definately allocate a 3x2 array of characters, but you have no idea how the resulting data is laid out in memory. In particular, there's no guarantee at all that A[1][0] immediately follows A[0][1]. That makes it very difficult to create MPI datatypes which span the data structure! You need to allocate 3x2 contiguous bytes, and then make the array point into it:

char **charalloc2d(int n, int m) {
    char *data = (char *)calloc(n*m,sizeof(char));
    char **array = (char **)calloc(n, sizeof(char *));
    for (int i=0; i<n; i++)
        array[i] = &(data[i*m]);

    return array;
}

void charfree2d(char **array) {
    free(array[0]);
    free(array);
    return;
}

/* ... */
nrows = 3;
ncols = 2;
A = charalloc2d(nrows,ncols);

Now we know something about the layout of the array, and can depend on that to build datatypes.

You're on the right track with the data types --

MPI_Datatype b_col_type;
MPI_Type_vector(3, 1, 1, MPI_CHAR, &b_col_type);
MPI_Type_commit(&b_col_type);

the signature of MPI_Type_vector is (count, blocklen, stride, old_type, *newtype).
We want nrows characters, that come in blocks of 1; but they're spaced ncols apart; so that's the stride.

Note that this is really the column type of the A array, rather than B; the type will depend on the number of columns in the array. So each process is using a different sendtype, which is fine.

MPI_Datatype a_col_type;
MPI_Type_vector(nrows, 1, ncols, MPI_CHAR, &a_col_type);
MPI_Type_commit(&a_col_type);

The final step is the MPI_Gatherv, and here you have to be a little cute. The trick is, we want to send (and receive) multiple of these things at a time - that is, several consecutive ones. But we need the next column not to be nrows*ncols chars away, but just one char away. Luckily, we can do that by setting the upper bound of the data structure to be just one character away from the lower bound, so that the next element does start in the right place. This is allowed by the standard, and in fact one of their examples in section 4.1.4 there hinges on it.

To do that, we create a resized type that ends just one byte after it starts:

MPI_Type_create_resized(a_col_type, 0, 1*sizeof(char), &new_a_col_type);
MPI_Type_commit(&new_a_col_type); 

and similarly for B; and now we can send and recieve multiples of these as one would expect. So the following works for me:

#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>

char **charalloc2d(int n, int m) {
    char *data = (char *)calloc(n*m,sizeof(char));
    char **array = (char **)calloc(n, sizeof(char *));
    for (int i=0; i<n; i++)
        array[i] = &(data[i*m]);

    return array;
}

void charfree2d(char **array) {
    free(array[0]);
    free(array);
    return;
}


int main(int argc, char *argv[])
{
    int numprocs, my_rank;
    int nrows, ncols, totncols;
    long int i, j;
    char **A;
    char **B;
    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);

    if(my_rank == 0)
    {
        nrows=3;
        ncols=2;
        totncols = 3;

        A = charalloc2d(nrows, ncols);
        B = charalloc2d(nrows, totncols);

        A[0][0] = 'a';
        A[1][0] = 'b';
        A[2][0] = 'c';
        A[0][1] = 'd';
        A[1][1] = 'e';
        A[2][1] = 'f';
    }
    else
    {
        nrows = 3;
        ncols = 1;
        A = charalloc2d(nrows, ncols);
        B = charalloc2d(1,1); /* just so gatherv survives */
        A[0][0] = 'g';
        A[1][0] = 'h';
        A[2][0] = 'i';

    }
    MPI_Datatype a_col_type, new_a_col_type;
    MPI_Type_vector(nrows, 1, ncols, MPI_CHAR, &a_col_type);
    MPI_Type_commit(&a_col_type);

    /* make the type have extent 1 character -- now the next
     * column starts in the next character of the array 
     */
    MPI_Type_create_resized(a_col_type, 0, 1*sizeof(char), &new_a_col_type);
    MPI_Type_commit(&new_a_col_type);

    MPI_Datatype b_col_type, new_b_col_type;
    if (my_rank == 0) {
        MPI_Type_vector(nrows, 1, totncols, MPI_CHAR, &b_col_type);
        MPI_Type_commit(&b_col_type);

        /* similarly "resize" b columns */
        MPI_Type_create_resized(b_col_type, 0, 1*sizeof(char), &new_b_col_type);
        MPI_Type_commit(&new_b_col_type);
    }

    int displs[2] = {0, 2};
    int recvcounts[2] = {2, 1};
    MPI_Gatherv(A[0], recvcounts[my_rank], new_a_col_type,
                B[0], recvcounts, displs, new_b_col_type,
                0, MPI_COMM_WORLD);
    if(my_rank == 0)
    {
        for(i=0; i<3; ++i)
        {
            for(j=0; j<3; ++j)
                printf("%c ", B[i][j]);
            printf("\n");
        }
    }
    MPI_Finalize();
    return 0;
}

这篇关于如何MPI_Gatherv来自处理器,其中,每一个过程可以发送不同的列数列的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆