在fortran中使用可分配数组发送MPI派生类型的段错误

时间:2018-06-05 20:03:00

标签: segmentation-fault fortran mpi derived-types

我遇到了Fortran程序的麻烦,其中主任务发送一个结构数组,其中有一个可分配的数组给从属。从设备接收数组并成功打印,然后,程序崩溃。 GDB调试器显示以下消息

Program received signal SIGSEGV, Segmentation fault. __GI___libc_free (mem=0x2) at malloc.c:2931

当然,我错过了一些东西。这是我的代码

program test_type  

use mpi

implicit none

type mytype
real,allocatable::x(:)
integer::a
end type mytype

type(mytype),allocatable::y(:)
integer::n,i,ierr,myid,ntasks,status
integer :: datatype0, ntasktype, oldtypes(2), blockcounts(2) 
integer, allocatable :: oldtypes2(:), blockcounts2(:), datatype(:)
integer(KIND=MPI_ADDRESS_KIND) :: offsets(2)
integer(KIND=MPI_ADDRESS_KIND), allocatable :: offsets2(:)
integer(kind=MPI_ADDRESS_KIND) :: extent

call mpi_init(ierr)
call mpi_comm_rank(mpi_comm_world,myid,ierr)
call mpi_comm_size(mpi_comm_world,ntasks,ierr)

n=2
allocate(y(ntasks))
allocate(oldtypes2(ntasks), blockcounts2(ntasks))
allocate(offsets2(ntasks), datatype(ntasks))
do i=1,ntasks
 allocate(y(i)%x(n))
 y(i)%x=0.
 y(i)%a=80
enddo

    if(myid==0)then
     do i=1,ntasks
      call random_number(y(i)%x)
      y(i)%a=myid
      write(0,*) "y(",i,") in process", myid, y(i)%x, y(i)%a
     enddo
    endif

   ! (1) Create a separate structure datatype for each record
   do i=1,ntasks
    call mpi_get_address(y(i)%x,offsets(1),ierr)
    call mpi_get_address(y(i)%a,offsets(2),ierr)
    offsets=offsets-offsets(1)

    oldtypes=(/ mpi_real,mpi_integer /)
    blockcounts=(/ n,1 /)

    call mpi_type_create_struct(2,blockcounts,offsets,oldtypes,datatype(i),ierr) 
   end do

   ! (2) Create a structure of structures that describes the whole array
   do i=1,ntasks
    call MPI_GET_ADDRESS(     y(i)%x, offsets2(i), ierr)
   enddo
   offsets2 = offsets2 - offsets2(1)
   do i=1,ntasks
    oldtypes2(i)=datatype(i)
    blockcounts2(i)=1
   enddo
   call mpi_type_create_struct(ntasks,blockcounts2,offsets2,oldtypes2,ntasktype,ierr) 
  call mpi_type_commit(ntasktype, ierr)

  ! (2.1) Free the intermediate datatypes
  do i=1,ntasks
   call MPI_TYPE_FREE(datatype(i), ierr)
  enddo

 ! (3) Send the array
 if(myid==0) then   
  do i=1,ntasks-1 
   call MPI_SEND(y(1)%x, 1, ntasktype, &
            i, 2, MPI_COMM_WORLD, ierr)
  enddo
  do i=1,ntasks-1 
   write(0,*) "sent", y(i)%x,y(i)%a
  enddo
else
 call MPI_RECV(y(1)%x,1, ntasktype, 0, 2, MPI_COMM_WORLD, status, ierr)
 do i=1,ntasks
  write(0,*) "task(",myid,") received", i,y(i)%x,y(i)%a
 enddo
end if

deallocate(y)
deallocate(oldtypes2, blockcounts2)
deallocate(offsets2,datatype)
call mpi_finalize(ierr)

end program

1 个答案:

答案 0 :(得分:1)

来自man MPI_Recv

Fortran Syntax
       USE MPI
       ! or the older form: INCLUDE 'mpif.h'
       MPI_RECV(BUF, COUNT, DATATYPE, SOURCE, TAG, COMM, STATUS, IERROR)
            <type>    BUF(*)
            INTEGER   COUNT, DATATYPE, SOURCE, TAG, COMM
            INTEGER   STATUS(MPI_STATUS_SIZE), IERROR

您的问题是非零等级的内存损坏,因为您声明了

integer :: status

而不是

integer :: status(MPI_STATUS_SIZE)

作为旁注,您可以通过直接创建带有2*ntasks元素的派生数据类型来简化代码,然后使用MPI_BOTTOM作为发送和接收缓冲区。

如果您真的想要操纵偏移,则应使用MPI_Aint_diff()代替-运算符。

[编辑] 以下是使用MPI_BOTTOM

的改版/简化版本
program test_type  

use mpi

implicit none

type mytype
real,allocatable::x(:)
integer::a
end type mytype

type(mytype),allocatable::y(:)
integer::n,i,ierr,myid,ntasks,status(MPI_STATUS_SIZE)
integer :: ntasktype
integer, allocatable :: oldtypes(:), blockcounts(:)
integer(KIND=MPI_ADDRESS_KIND), allocatable :: offsets(:)

call mpi_init(ierr)
call mpi_comm_rank(mpi_comm_world,myid,ierr)
call mpi_comm_size(mpi_comm_world,ntasks,ierr)

n=2
allocate(y(ntasks))
allocate(oldtypes(2*ntasks), blockcounts(2*ntasks))
allocate(offsets(2*ntasks))
do i=1,ntasks
 allocate(y(i)%x(n))
 y(i)%x=0.
 y(i)%a=80
enddo

if(myid==0)then
  do i=1,ntasks
    call random_number(y(i)%x)
    y(i)%a=myid
    write(0,*) "y(",i,") in process", myid, y(i)%x, y(i)%a
   enddo
endif

do i=1,ntasks
  call mpi_get_address(y(i)%x,offsets(2*i-1),ierr)
  call mpi_get_address(y(i)%a,offsets(2*i  ),ierr)

  oldtypes(2*i-1) = mpi_real
  oldtypes(2*i  ) = mpi_integer

  blockcounts(2*i-1) = n
  blockcounts(2*i  ) = 1
end do

call mpi_type_create_struct(2*ntasks,blockcounts,offsets,oldtypes,ntasktype,ierr) 
call mpi_type_commit(ntasktype, ierr)

! (3) Send the array
if(myid==0) then   
  do i=1,ntasks-1 
    call MPI_SEND(MPI_BOTTOM, 1, ntasktype, &
                  i, 2, MPI_COMM_WORLD, ierr)
  enddo
  do i=1,ntasks-1 
   write(0,*) "sent", y(i)%x,y(i)%a
  enddo
else
  call MPI_RECV(MPI_BOTTOM,1, ntasktype, 0, 2, MPI_COMM_WORLD, status, ierr)
  do i=1,ntasks
    write(0,*) "task(",myid,") received", i,y(i)%x,y(i)%a
  enddo
end if

do i=1, ntasks
  deallocate(y(i)%x)
enddo
deallocate(y)
deallocate(oldtypes, blockcounts)
deallocate(offsets)

call mpi_finalize(ierr)

end program
相关问题