MPI_Type_Create_Hindexed_Block生成派生数据类型的错误范围 [英] MPI_Type_Create_Hindexed_Block generates wrong extent of derived datatype

查看:191
本文介绍了MPI_Type_Create_Hindexed_Block生成派生数据类型的错误范围的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

使用Fortran,我试图为动态分配的结构构建派生数据类型,但它获得了新类型的错误范围,代码如下:

 
隐式无
INCLUDE'mpif.h'
INTEGER :: I
INTEGER :: MYID,NUMPROCS,IError
INTEGER :: Extent,Size,Disp(2)
INTEGER :: Status(MPI_STATUS_SIZE)
INTEGER :: New_Type,Blocks(3),Types(3),Offsets(3),POS(2) )
INTEGER :: POS_(4)
INTEGER :: ElmOffset(3),Send_Type
INTEGER :: M

TYPE struct
INTEGER :: N
REAL :: A
REAL :: B(2)
END TYPE结构
TYPE(结构),ALLOCATABLE :: Structs(:)

M = 9

CALL MPI_INIT(IError)
CALL MPI_COMM_SIZE(MPI_COMM_WORLD,NUMPROCS,IError)
CALL MPI_COMM_RANK(MPI_COMM_WORLD,MYID,IError)
$ b $结构(I)%N = I * 1000 + MYID
结构(I)%A = 250.0_8 + MYID *结构(I)%ALLOCATE(结构(M))
DO I = 1,M
结构1.0
结构(I)%B(1)= 10.0_8 + MYID * 1.0
结构(I)%B(2)= 20.0_8 + MYID * 1.0
END DO

CALL MPI_GET_ADDRESS(Structs(1)%N,POS_(1),IError)
CALL MPI_GET_ADDRESS(Structs(1)%A,POS_(2),IError)
CALL MPI_GET_ADDRESS结构(1)%B(1),POS_(3),IError)
CALL MPI_GET_ADDRESS(Structs(1)%B(2),POS_(4),IError)
POS_ = POS_ - POS_ (1)
IF(MYID.EQ.0)THEN
WRITE(*,*)MYID,POS_
END IF

类型(1)= MPI_INTEGER
类型(2)= MPI_DOUBLE_PRECISION
类型(3)= MPI_DOUBLE_PRECISION

偏移量(1)= 0
CALL MPI_GET_ADDRESS(Structs(1)%N,Disp(1) ),IError)
CALL MPI_GET_ADDRESS(Structs(1)%A,Disp(2),IError)
Offsets(2)= Offsets(1)+ Blocks(1)*(Disp(2) - Disp(1))
Disp(1)= Disp(2)
CALL MPI_GET_ADDRESS(Structs(1)%B(1),Disp(2),IError)
Offsets(3) =偏移(2)+块(2)*(Disp(2)-Disp(1))

CALL MPI_TYPE_STRUCT(3,块,偏移量,类型,New_Type,IError)
CALL MPI_TYPE_COMMIT(New_Type,IError)

CALL MPI_TYPE_EXTENT(New_Type,Extent,IError)
CALL MPI_TYPE_SIZE(New_Type,Size,IError)
IF(MYID.EQ.0)THEN
WRITE(*,*)'New_Type extent =',Extent
WRITE(*,*) 'New_Type size =',Size
END IF

CALL MPI_GET_ADDRESS(Structs(1)%N,ElmOffset(1),IError)
CALL MPI_GET_ADDRESS(Structs(2) (3)%N,ElmOffset(3),IError)
ElmOffset = ElmOffset - ElmOffset(1)

IF(MYID.EQ.0)THEN
WRITE(*,*)MYID,ElmOffset
END IF

CALL MPI_TYPE_CREATE_HINDEXED_BLOCK(3,1,ElmOffset,New_Type,Send_Type,IError )
CALL MPI_TYPE_COMMIT(Send_Type,IError)

CALL MPI_TYPE_EXTENT(Send_Type,Extent,IError)
CALL MPI_TYPE_SIZE(Send_Type,Si ze,IError)

IF(MYID.EQ.0)THEN
WRITE(*,*)'Send_Type extents =',Extent
WRITE(*,*)'Send_Type size =',Size
END IF

CALL MPI_TYPE_FREE(Send_Type,IError)
CALL MPI_TYPE_FREE(New_Type,IError)
CALL MPI_FINALIZE(IError)

END PROGRAM MAIN

输出如下:

  POS_:0 8 16 24 
New_Type范围:32
New_Type大小:28

以上结果显示没有问题

  ElemOffsets:0 32 64 
Send_Type范围:-32 <=问题在这里!它应该是96
Send_Type大小:84

我实际上想发送3块结构使用派生的数据类型:Send_Type

  IF(MYID.EQ.0)THEN 
DO I = 1,( NUMPROCS-1)
CALL MPI_SEND(Structs(1)%N,1,Send_Type,I,0,MPI_COMM_WORLD,IError)
ELSE
CALL MPI_RECV(Structs(1)%N,1 ((MYID + 10),*)结构(1)%N,Structs((),Send_Type,0,0,MPI_COMM_WORLD,Status,IError)

END IF

WRITE 1)%A
WRITE((MYID + 10),*)结构(1)%B(1),结构(1)%B(2)

WRITE((MYID + (3)%B(1)结构(3)%N,结构(3)%A
WRITE((MYID + 100),*)结构(3)%B )

但是,显示错误:程序异常 - 访问冲突



我不知道有什么问题...
但是一定是Send_Type没有正确创建



这样的问题怎么能解决解决?

问题在于,在64位操作系统上,地址大小大于32位整数。因此,函数 int MPI_Get_address(const void * location,MPI_Aint * address)输出一个 MPI_Aint ,其大小足以包含一个地址。事实上, MPI_Aint 可以大于 MPI_INT



<在Fortran中, MPI_Aint 写入 INTEGER(KIND = MPI_ADDRESS_KIND) 。请参阅MPI_(I)中的 MPI_Aint NEIGHBOR_ALLTOALLW( )vs int MPI_(I)ALLTOALLW()和2.5.6节 INTEGER(KIND = MPI_ADDRESS_KIND)= (用于 POS _ Disp )时必须使用, Offset Extent ElmOffset )。



基于您的修正示例代码,由 mpif90 main.f90 -o main -Wall 编译,并由 mpirun -np 2 main 写道:

 程序主要
隐含无
INCLUDE'mpif.h'
INTEGER :: I
INTEGER :: MYID,NUMPROCS,IError
INTEGER :: Size
INTEGER :: Status(MPI_STATUS_SIZE)
INTEGER :: New_Type,块(3),类型(3)
INTEGER :: Send_Type
INTEGER :: M
INTEGER(KIND = MPI_ADDRESS_KIND):: Offsets(3),POS_(4),ElmOffset(3) ),Disp(2),Extent

TYPE Struct
INTEGER :: N
REAL * 8 :: A
REAL * 8 :: B(2)
END TYPE结构
TYPE(结构),ALLOCATABLE :: Structs(:)
WRITE(*,*)'Integer ='的大小,SIZEOF(M)
WRITE(* ,*)'整数大小(KIND = MPI_ADDRESS_KIND)=',SIZEOF(范围)
M = 9

CALL MPI_INIT(IError)
CALL MPI_COMM_SIZE(MPI_COMM_WORLD,NUMPROCS, IError)
CALL MPI_COMM_RANK(MPI_COMM_WORLD,MYID,IError)

ALLOCATE(Structs(M))
DO I = 1,M
结构(I)%N = I * 1000 + MYID
结构(I)%A = 250.0_8 + MYID * 1.0
结构(I)%B(1)= 10.0_8 + MYID * 1.0
结构(1)= 1
区块(2)= 1
区块(2)= 20.0_8 + MYID * 1.0
END DO

区块)= 2

CALL MPI_GET_ADDRESS(Struct (1)%N,POS_(1),IError)
CALL MPI_GET_ADDRESS(Structs(1)%A,POS_(2),IError)
CALL MPI_GET_ADDRESS(Structs(1)%B(1) ),POS_(3),IError)
CALL MPI_GET_ADDRESS(Structs(1)%B(2),POS_(4),IError)
POS_ = POS_ - POS_(1)
IF (MYID.EQ.0)THEN
WRITE(*,*)MYID,POS_
END IF

类型(1)= MPI_INTEGER
类型(2)= MPI_DOUBLE_PRECISION
类型(3)= MPI_DOUBLE_PRECISION

偏移量(1)= 0
CALL MPI_GET_ADDRESS(Structs(1)%N,Disp(1),IError)
CALL MPI_GET_ADDRESS(Structs(1)%A,Disp(2),IError)
!Offsets(2)= Offsets(1)+ Blocks(1)*(Disp(2)-Disp(1))
偏移(2)=偏移(1)+(Disp(2)-Disp(1))
Disp(1)= Disp(2)
CALL MPI_GET_ADDRESS(Structs(1)%B 1),Disp(2),IError)
!Offsets(3)= Offsets(2)+ Blocks(2)*(Disp(2)-Disp(1))
Offsets(3)=偏移(2)+(Disp(2)-Disp(1))

CALL MPI_TYPE_CREATE_STRUCT(3,块(new_type,IError)
CALL MPI_TYPE_COMMIT(New_Type,IError)

CALL MPI_TYPE_GET_EXTENT(New_Type,Extent,IError)
CALL MPI_TYPE_SIZE(New_Type,Size,IError)
IF(MYID.EQ.0)THEN
WRITE(*,*)'New_Type extents =',Extent
WRITE(*,*)'New_Type size =',Size
END IF

CALL MPI_GET_ADDRESS(Structs(1)%N,ElmOffset(1),IError)
CALL MPI_GET_ADDRESS(Structs(2)%N,ElmOffset(2),IError)
CALL MPI_GET_ADDRESS(Structs(3)%N,ElmOffset(3),IError)
ElmOffset = ElmOffset - ElmOffset(1)

IF(MYID.EQ.0)THEN
WRITE(*,*)MYID,ElmOffset
END IF

CALL MPI_TYPE_CREATE_HINDEXED_BLOCK(3,1,ElmOffset,New_Type,Send_Type,IError)
CALL MPI_TYPE_COMMIT(Send_Type, IError)

CALL MPI_TYPE_GET_EXTENT(Send_Type,Extent,IError)
CALL MPI_TYPE_SIZE(Send_Type,Size,IError)

IF ID.EQ.0)THEN
WRITE(*,*)'Send_Type extents =',Extent
WRITE(*,*)'Send_Type size =',Size
END IF


IF(MYID.EQ.0)THEN
DO I = 1,(NUMPROCS-1)
CALL MPI_SEND(Structs(1)%N,1,Send_Type, I,0,MPI_COMM_WORLD,IError)
END DO
ELSE
CALL MPI_RECV(Structs(1)%N,1,Send_Type,0,0,MPI_COMM_WORLD,Status,IError)$ b $ ((MYID + 10),*)结构(1)%N,结构(1)%A
WRITE((MYID + 10), *)结构(1)%B(1),结构(1)%B(2)

写((MYID + 100),*)结构(3)%N,结构(3) %A
WRITE((MYID + 100),*)Structs(3)%B(1),Structs(3)%B(2)

CALL MPI_TYPE_FREE(Send_Type,IError)
CALL MPI_TYPE_FREE(New_Type,IError)
CALL MPI_FINALIZE(IError)

END PROGRAM MAIN

我将 REAL :: A 更改为 REAL * 8 :: A 至删除<$ c行的警告$ c> Structs(I)%A = 250.0_8 + MYID * 1.0 关于双重浮动转换。正如Hristo Iliev所注意到的,它与使用 MPI_DOUBLE_PRECISION 的新数据类型一致。

Using Fortran,I'm trying to build a derived datatype for dynamically allocated structs,but it got wrong extent of the new type, codes are as follows:

PROGRAM MAIN
IMPLICIT NONE
INCLUDE 'mpif.h'
INTEGER :: I
INTEGER :: MYID,NUMPROCS,IError
INTEGER :: Extent,Size,Disp(2)
INTEGER :: Status(MPI_STATUS_SIZE)
INTEGER :: New_Type, Blocks(3), Types(3), Offsets(3), POS(2)
INTEGER :: POS_(4)
INTEGER :: ElmOffset(3),Send_Type
INTEGER :: M

TYPE Struct    
    INTEGER :: N
    REAL :: A
    REAL :: B(2)
END TYPE Struct
TYPE(Struct),ALLOCATABLE :: Structs(:)

    M=9

    CALL MPI_INIT( IError )
    CALL MPI_COMM_SIZE( MPI_COMM_WORLD, NUMPROCS, IError )
    CALL MPI_COMM_RANK( MPI_COMM_WORLD, MYID,     IError )

    ALLOCATE( Structs(M) )
    DO I=1,M
        Structs(I)%N = I*1000 + MYID
        Structs(I)%A = 250.0_8 + MYID*1.0
        Structs(I)%B(1) = 10.0_8 + MYID*1.0
        Structs(I)%B(2) = 20.0_8 + MYID*1.0
    END DO

    CALL MPI_GET_ADDRESS( Structs(1)%N,    POS_(1), IError )
    CALL MPI_GET_ADDRESS( Structs(1)%A,    POS_(2), IError )
    CALL MPI_GET_ADDRESS( Structs(1)%B(1), POS_(3), IError )
    CALL MPI_GET_ADDRESS( Structs(1)%B(2), POS_(4), IError )
    POS_=POS_ - POS_(1)
    IF (MYID.EQ.0) THEN
        WRITE(*,*) MYID, POS_
    END IF

    Types(1) = MPI_INTEGER
    Types(2) = MPI_DOUBLE_PRECISION
    Types(3) = MPI_DOUBLE_PRECISION

    Offsets(1) = 0
    CALL MPI_GET_ADDRESS( Structs(1)%N, Disp(1), IError )
    CALL MPI_GET_ADDRESS( Structs(1)%A, Disp(2), IError )
    Offsets(2) = Offsets(1) + Blocks(1)*( Disp(2)-Disp(1) )
    Disp(1) = Disp(2)
    CALL MPI_GET_ADDRESS( Structs(1)%B(1), Disp(2), IError )
    Offsets(3) = Offsets(2) + Blocks(2)*( Disp(2)-Disp(1) )

    CALL MPI_TYPE_STRUCT( 3, Blocks, Offsets, Types, New_Type, IError )
    CALL MPI_TYPE_COMMIT( New_Type, IError )

    CALL MPI_TYPE_EXTENT(New_Type, Extent, IError)
    CALL MPI_TYPE_SIZE(New_Type, Size, IError)
    IF (MYID.EQ.0) THEN
        WRITE(*,*) 'New_Type extents = ', Extent
        WRITE(*,*) 'New_Type size = ', Size
    END IF

    CALL MPI_GET_ADDRESS( Structs(1)%N, ElmOffset(1), IError )
    CALL MPI_GET_ADDRESS( Structs(2)%N, ElmOffset(2), IError )
    CALL MPI_GET_ADDRESS( Structs(3)%N, ElmOffset(3), IError )
    ElmOffset=ElmOffset - ElmOffset(1)

    IF (MYID.EQ.0) THEN
        WRITE(*,*) MYID,ElmOffset
    END IF

    CALL MPI_TYPE_CREATE_HINDEXED_BLOCK( 3, 1, ElmOffset, New_Type, Send_Type, IError )
    CALL MPI_TYPE_COMMIT( Send_Type, IError )

    CALL MPI_TYPE_EXTENT( Send_Type, Extent, IError )
    CALL MPI_TYPE_SIZE( Send_Type, Size, IError )

    IF (MYID.EQ.0) THEN
        WRITE(*,*) 'Send_Type extents = ', Extent
        WRITE(*,*) 'Send_Type size = ', Size
    END IF

    CALL MPI_TYPE_FREE(Send_Type,IError)
    CALL MPI_TYPE_FREE(New_Type,IError)
    CALL MPI_FINALIZE(IError)

END PROGRAM MAIN

The output are as follows:

            POS_ : 0  8  16  24
New_Type Extents : 32
   New_Type Size : 28

Results above show no problem

      ElemOffsets :  0  32  64
Send_Type Extents : -32             <= Problem is here !!! It should be 96
   Send_Type Size :  84

I actually want to send 3 blocks of Structs using the derived data type: Send_Type

IF (MYID.EQ.0) THEN
    DO I=1,(NUMPROCS-1)
         CALL MPI_SEND( Structs(1)%N, 1, Send_Type, I, 0, MPI_COMM_WORLD, IError)       
ELSE
    CALL MPI_RECV( Structs(1)%N, 1, Send_Type, 0, 0, MPI_COMM_WORLD, Status, IError)

END IF

WRITE( (MYID+10),*) Structs(1)%N, Structs(1)%A
WRITE( (MYID+10),*) Structs(1)%B(1), Structs(1)%B(2)

WRITE( (MYID+100),*) Structs(3)%N, Structs(3)%A
WRITE( (MYID+100),*) Structs(3)%B(1), Structs(3)%B(2)

But, there shows error: Program Exception - access violation

I don't know what's wrong ... But it must be that the Send_Type is not properly created

How can such problem be solved ?

解决方案

The problem is due to the fact that on 64bit OS, the size of adresses is larger than a 32 bit integer. Hence, the function int MPI_Get_address(const void *location, MPI_Aint *address) outputs an MPI_Aint, large enough to contain an adress. Indeed, an MPI_Aint can be larger than an MPI_INT.

In Fortran, the MPI_Aint writes INTEGER (KIND=MPI_ADDRESS_KIND). See also MPI_Aint in MPI_(I)NEIGHBOR_ALLTOALLW() vs int in MPI_(I)ALLTOALLW() and section 2.5.6 of the MPI Standard on page 48.

Consequently, the datatype INTEGER (KIND=MPI_ADDRESS_KIND) must be used whenever adresses are involved (for POS_, Disp, Offset, Extent and ElmOffset).

A corrected sample code based on yours, to be compiled by mpif90 main.f90 -o main -Wall and ran by mpirun -np 2 main writes:

PROGRAM MAIN
IMPLICIT NONE
INCLUDE 'mpif.h'
INTEGER :: I
INTEGER :: MYID,NUMPROCS,IError
INTEGER :: Size
INTEGER :: Status(MPI_STATUS_SIZE)
INTEGER :: New_Type, Blocks(3), Types(3)
INTEGER :: Send_Type
INTEGER :: M
INTEGER (KIND=MPI_ADDRESS_KIND):: Offsets(3),POS_(4), ElmOffset(3), Disp(2),Extent

TYPE Struct    
    INTEGER :: N
    REAL*8 :: A
    REAL*8 :: B(2)
END TYPE Struct
TYPE(Struct),ALLOCATABLE :: Structs(:)
    WRITE(*,*) 'Size of Integer = ',SIZEOF(M)
    WRITE(*,*) 'Size of Integer (KIND=MPI_ADDRESS_KIND)= ',SIZEOF(Extent)
    M=9

    CALL MPI_INIT( IError )
    CALL MPI_COMM_SIZE( MPI_COMM_WORLD, NUMPROCS, IError )
    CALL MPI_COMM_RANK( MPI_COMM_WORLD, MYID,     IError )

    ALLOCATE( Structs(M) )
    DO I=1,M
        Structs(I)%N = I*1000 + MYID
        Structs(I)%A = 250.0_8 + MYID*1.0
        Structs(I)%B(1) = 10.0_8 + MYID*1.0
        Structs(I)%B(2) = 20.0_8 + MYID*1.0
    END DO

    Blocks(1)=1
    Blocks(2)=1
    Blocks(3)=2

    CALL MPI_GET_ADDRESS( Structs(1)%N,    POS_(1), IError )
    CALL MPI_GET_ADDRESS( Structs(1)%A,    POS_(2), IError )
    CALL MPI_GET_ADDRESS( Structs(1)%B(1), POS_(3), IError )
    CALL MPI_GET_ADDRESS( Structs(1)%B(2), POS_(4), IError )
    POS_=POS_ - POS_(1)
    IF (MYID.EQ.0) THEN
        WRITE(*,*) MYID, POS_
    END IF

    Types(1) = MPI_INTEGER
    Types(2) = MPI_DOUBLE_PRECISION
    Types(3) = MPI_DOUBLE_PRECISION

    Offsets(1) = 0
    CALL MPI_GET_ADDRESS( Structs(1)%N, Disp(1), IError )
    CALL MPI_GET_ADDRESS( Structs(1)%A, Disp(2), IError )
    !Offsets(2) = Offsets(1) + Blocks(1)*( Disp(2)-Disp(1) )
    Offsets(2) = Offsets(1) + ( Disp(2)-Disp(1) )
    Disp(1) = Disp(2)
    CALL MPI_GET_ADDRESS( Structs(1)%B(1), Disp(2), IError )
    !Offsets(3) = Offsets(2) + Blocks(2)*( Disp(2)-Disp(1) )
    Offsets(3) = Offsets(2) + ( Disp(2)-Disp(1) )

    CALL MPI_TYPE_CREATE_STRUCT( 3, Blocks, Offsets, Types, New_Type, IError )
    CALL MPI_TYPE_COMMIT( New_Type, IError )

    CALL MPI_TYPE_GET_EXTENT(New_Type, Extent, IError)
    CALL MPI_TYPE_SIZE(New_Type, Size, IError)
    IF (MYID.EQ.0) THEN
        WRITE(*,*) 'New_Type extents = ', Extent
        WRITE(*,*) 'New_Type size = ', Size
    END IF

    CALL MPI_GET_ADDRESS( Structs(1)%N, ElmOffset(1), IError )
    CALL MPI_GET_ADDRESS( Structs(2)%N, ElmOffset(2), IError )
    CALL MPI_GET_ADDRESS( Structs(3)%N, ElmOffset(3), IError )
    ElmOffset=ElmOffset - ElmOffset(1)

    IF (MYID.EQ.0) THEN
        WRITE(*,*) MYID,ElmOffset
    END IF

    CALL MPI_TYPE_CREATE_HINDEXED_BLOCK( 3, 1, ElmOffset, New_Type, Send_Type, IError )
    CALL MPI_TYPE_COMMIT( Send_Type, IError )

    CALL MPI_TYPE_GET_EXTENT( Send_Type, Extent, IError )
    CALL MPI_TYPE_SIZE( Send_Type, Size, IError )

    IF (MYID.EQ.0) THEN
        WRITE(*,*) 'Send_Type extents = ', Extent
        WRITE(*,*) 'Send_Type size = ', Size
    END IF


    IF (MYID.EQ.0) THEN
        DO I=1,(NUMPROCS-1)
            CALL MPI_SEND( Structs(1)%N, 1, Send_Type, I, 0, MPI_COMM_WORLD, IError)
        END DO       
    ELSE
        CALL MPI_RECV( Structs(1)%N, 1, Send_Type, 0, 0, MPI_COMM_WORLD, Status, IError)

    END IF

    WRITE( (MYID+10),*) Structs(1)%N, Structs(1)%A
    WRITE( (MYID+10),*) Structs(1)%B(1), Structs(1)%B(2)

    WRITE( (MYID+100),*) Structs(3)%N, Structs(3)%A
    WRITE( (MYID+100),*) Structs(3)%B(1), Structs(3)%B(2)

    CALL MPI_TYPE_FREE(Send_Type,IError)
    CALL MPI_TYPE_FREE(New_Type,IError)
    CALL MPI_FINALIZE(IError)

END PROGRAM MAIN

I changed REAL :: A to REAL*8 :: A to remove a warning on line Structs(I)%A = 250.0_8 + MYID*1.0 about double to float conversion. As noticed by Hristo Iliev, it is consistent with the new datatype which uses MPI_DOUBLE_PRECISION.

这篇关于MPI_Type_Create_Hindexed_Block生成派生数据类型的错误范围的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆