MPI - 发送和接收列 [英] MPI - Sending and Receiving a column

查看:115
本文介绍了MPI - 发送和接收列的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我需要从一个进程发送一个矩阵列从另一个接收它。我试着运行下面的程序,但我发现了一个奇怪的结果(至少我是这么认为的);仅矩阵的第一个元素被复制,并且一些矩阵元素意外更改。

 的#include<&stdio.h中GT;
#包括LT&;&stdlib.h中GT;
#包括LT&;&string.h中GT;
#包括LT&;&math.h中GT;
#包括mpi.h无效掉期(INT *一,为int * B){
    INT温度;
    TEMP = *一个;
    * A = * B;
    * B =温度;
}
无效print_matrix(双** A,INT N){
    INT I,J;
    对于(i = 0; I< N;我++){
        为(J = 0; J< N; J ++){
            的printf(%F,A [I] [J]);
        }
        的printf(\\ n);
    }
}INT主(INT ARGC,CHAR *的argv []){
    INT I,J,K,L,N,本身份识别码,对,MAXP;
    双** A;
    MPI_Datatype col_type;
    MPI_Status状态;    N = 3;
    A =的malloc(N * sizeof的(双*)); / *分配内存* /
    对于(i = 0; I< N;我++)
        A [i] =的malloc(N * sizeof的(双));    A [0] [0] = - 1;
    A [0] [1] = 2;
    A [0] [2] = - 1;
    A [1] [0] = 2;
    A [1] [1] = - 1;
    A [1] [2] = 0;
    A [2] [0] = 1;
    A [2] [1] = 7;
    A [2] [2] = - 3;    MPI_INIT(安培; ARGC,&安培; argv的);    MPI_Type_vector(N,1,N,MPI_DOUBLE,&安培; col_type);
    MPI_Type_commit(安培; col_type);
    MPI_Comm_size(MPI_COMM_WORLD,&放大器; P);
    MPI_Comm_rank(MPI_COMM_WORLD,&安培;身份识别码);    如果(身份识别码== 0){
        的printf(,P与P =%d个\\ n启动方法);
        print_matrix(A,N);
    }
    如果(身份识别码== 0){
            MAXP = 2;
            A [0] [0] = - 43;
            A [1] [0] = - 33;
            A [2] [0] = - 23;
            的printf(之前发送\\ n);
            print_matrix(A,N);
            为(L = 0; L&下,P,L ++)
                如果(L!=本身份识别码){
                    MPI_SEND(安培; A [0] [0],1,col_type,1,0,MPI_COMM_WORLD);
                    MPI_SEND(安培; MAXP,1,MPI_INT,1,1-,MPI_COMM_WORLD);
                }
            的printf(之后,发送\\ n);
            print_matrix(A,N);
    }
    其他{
            //接收(K)
            的printf(以前RECIEVING \\ n);
            print_matrix(A,N);
            MPI_RECV(安培; A [0] [1],1,col_type,0,0,MPI_COMM_WORLD,&安培;状态);
            MPI_RECV(安培; MAXP,1,MPI_INT,0,1,MPI_COMM_WORLD,&安培;状态);
            的printf(刚刚收到\\ n);
            print_matrix(A,N);
    }    MPI_Finalize();
}


解决方案

现在的问题是与你的分配:

  A =的malloc(N * sizeof的(双*)); / *分配内存* /
对于(i = 0; I< N;我++)
    A [i] =的malloc(N * sizeof的(双));

这是完全正常的,但它不一定分配n * n的双打的连续数组;它分配ñ双打的n个阵列,以及那些可能被分散在相互内存。其中(除了潜在的缓存问题)也蛮好,除了当你定义这样一列:

  MPI_Type_vector(N,1,N,MPI_DOUBLE,&安培; col_type);

例如,N双打,每一个是n从previous一项双打的时候,你假设所有的数据都在一个大的块布局。

改变世界的最简单的事情就是你的配置,以确保它是所有连续的,为了(这几乎总是你想要的科学计算):

  A =的malloc(N * sizeof的(双*)); / *分配指针* /
A [0] =的malloc(N * N *的sizeof(双)); / *分配数据* /
对于(i = 1; I< N;我++)
    A [i] =&安培(A [0] [I * N]);/ * ... * /免费(A [0]);
免费(A);

I need to send a matrix column from one process and receive it from another. I tried running the following program, but I'm getting a weird result (at least I think so); Only the first element of the matrix is copied, and some matrix elements change unexpectedly.

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "mpi.h"

void swap(int* a,int* b){
    int temp;
    temp=*a;
    *a=*b;
    *b=temp;
}
void print_matrix(double** A,int n){
    int i,j;
    for(i=0;i<n;i++){
        for(j=0;j<n;j++){
            printf("%f ",A[i][j]);
        }
        printf("\n");
    }
}

int main(int argc, char *argv[]){
    int i,j,k,l,n,myid,p,maxp;
    double **A;
    MPI_Datatype col_type;
    MPI_Status status;

    n=3;
    A=malloc(n*sizeof(double*)); /*allocating memory */
    for(i=0;i<n;i++)
        A[i]=malloc(n*sizeof(double));

    A[0][0]=-1;
    A[0][1]=2;
    A[0][2]=-1;
    A[1][0]=2;
    A[1][1]=-1;
    A[1][2]=0;
    A[2][0]=1;
    A[2][1]=7;
    A[2][2]=-3;

    MPI_Init(&argc,&argv);

    MPI_Type_vector(n, 1, n, MPI_DOUBLE,&col_type);
    MPI_Type_commit(&col_type);
    MPI_Comm_size(MPI_COMM_WORLD,&p);
    MPI_Comm_rank(MPI_COMM_WORLD,&myid);

    if(myid==0){
        printf("Starting Method with p=%d\n",p);
        print_matrix(A,n);
    }
    if(myid==0){
            maxp=2;
            A[0][0]=-43;
            A[1][0]=-33;
            A[2][0]=-23;
            printf("BEFORE SENDING\n");
            print_matrix(A,n);
            for(l=0;l<p;l++)
                if(l!=myid){ 
                    MPI_Send(&A[0][0], 1, col_type,l,0,MPI_COMM_WORLD);
                    MPI_Send(&maxp,1,MPI_INT,l,1,MPI_COMM_WORLD);
                }
            printf("AFTER SENDING\n");
            print_matrix(A,n);
    }
    else{
            //receive(k)
            printf("BEFORE RECIEVING\n");
            print_matrix(A,n);
            MPI_Recv(&A[0][1],1,col_type,0,0,MPI_COMM_WORLD,&status);
            MPI_Recv(&maxp,1,MPI_INT,0,1,MPI_COMM_WORLD,&status);
            printf("Just Recieved\n");
            print_matrix(A,n);
    }

    MPI_Finalize();
}

解决方案

The problem is with your allocation:

A=malloc(n*sizeof(double*)); /*allocating memory */
for(i=0;i<n;i++)
    A[i]=malloc(n*sizeof(double));

This is perfectly fine, but it doesn't necessarily allocate a contiguous array of n*n doubles; it allocates n arrays of n doubles, and those could be scattered all over memory relative to each other. Which (aside from potential cache issues) is also fine, except that when you define a column in this way:

MPI_Type_vector(n, 1, n, MPI_DOUBLE,&col_type);

Eg, n doubles, each of which are n doubles away from the previous one, you're assuming that all of the data is laid out in one big block.

The easiest thing to change is your allocation, to make sure that it is all contiguous and in order (this is almost always what you want for scientific computing):

A=malloc(n*sizeof(double*));        /*allocating pointers */
A[0] = malloc(n*n*sizeof(double));  /* allocating data */
for(i=1;i<n;i++)
    A[i]=&(A[0][i*n]);

/* ... */

free(A[0]);
free(A);

这篇关于MPI - 发送和接收列的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆