使用Bcast,Scatter和Gather with Dynamic Allocation时的MPI分段错误

时间:2016-06-02 17:37:59

标签: c++ matrix malloc mpi dynamic-memory-allocation

我正在使用一些代码,我遇到了从concole输入实现矩阵多数表大小的问题。

第一个版本的工作:

 const int size = 1000;
 int mat_a[size][size], mat_b[size][size], mat_c[size][size];

要使用控制台参数,我发现需要实现动态数组分配。 不幸的是我遇到了一个问题:

 *** Process received signal ***
 Signal: Segmentation fault (11)
 Signal code: Address not mapped (1)
 Failing at address: 0x7ffd955237f8

我认为这个问题可能出现在像Bcast,Scatter和Gather这样的MPI函数中。我搜索了stackovf中的类似案例,但我看不到它。

以下是代码:

#include <mpi.h>
#include <stdio.h>
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <math.h>

int main(int argc, char *argv[])
{
  int taskid, ntasks, mat_start, mat_end, i, j, k;
double start_time; //hold start time
double end_time; // hold end time  

  MPI_Init (&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &taskid);   
  MPI_Comm_size(MPI_COMM_WORLD, &ntasks); 

        int size = 0;

        if (argc != 2) {
                printf("No arguments");
                exit(-1);
        }

        size = atoi(argv[1]);

        if (size < 0 ) {
                printf("SIZE: %2d \n", size);
                exit(-1);
        }

int **mat_a = (int **)malloc(sizeof(int *)*size);
int **mat_b = (int **)malloc(sizeof(int *)*size);  
int **mat_c = (int **)malloc(sizeof(int *)*size);

for (int z = 0 ; z < size ; z++){
     mat_a[z] = (int *)malloc(sizeof(int)*size);
     mat_b[z] = (int *)malloc(sizeof(int)*size);
     mat_c[z] = (int *)malloc(sizeof(int)*size);
}

  mat_start = taskid * size/ntasks;
  mat_end = (taskid+1) * size/ntasks;

  if (taskid==0) {
    for (i = 0; i < size; i++) {
        for (j = 0; j < size; j++) {
            mat_a[i][j] = (int)(sin(i) * i * j) % 10;
        }
    }
    for (i = 0; i < size; i++) {
        for (j = 0; j < size; j++) {
            mat_b[i][j] = (int)(cos(j) * (i + j)) % 10;
        }
    }
  }

start_time = MPI_Wtime();

  MPI_Bcast (&mat_b, size*size, MPI_INT, 0, MPI_COMM_WORLD);
  MPI_Scatter (&mat_a, size*size/ntasks, MPI_INT, mat_a[mat_start], size*size/ntasks, MPI_INT, 0, MPI_COMM_WORLD);

  printf("computing slice %d (from row %d to %d)\n", taskid, mat_start, mat_end-1);
  for (i=mat_start; i<mat_end; i++) 
    for (j=0; j<size; j++) {
      mat_c[i][j]=0;
      for (k=0; k<size; k++)
    mat_c[i][j] += mat_a[i][k]*mat_b[k][j];
    }

  MPI_Gather (mat_c[mat_start], size*size/ntasks, MPI_INT, mat_c, size*size/ntasks, MPI_INT, 0, MPI_COMM_WORLD);

        end_time = MPI_Wtime();
        printf("\nRunning Time = %f\n\n", end_time - start_time);

  MPI_Finalize();
  return 0;
}

谁能告诉我出了什么问题?

编辑:

谢谢你的回答。我试图实施您的解决方案,但没有取得好成绩。我改变了部分代码看起来像这样:

int **mat_a=(int **)malloc(size*sizeof(int *));
        int **mat_b=(int **)malloc(size*sizeof(int *));
        int **mat_c=(int **)malloc(size*sizeof(int *));

    if(mat_a==NULL){fprintf(stderr,"malloc failed\n");exit(1);}
    if(mat_b==NULL){fprintf(stderr,"malloc failed\n");exit(1);}
    if(mat_c==NULL){fprintf(stderr,"malloc failed\n");exit(1);}

    mat_a[0]=(int*)malloc(size*size*sizeof(int));
 mat_b[0]=(int*)malloc(size*size*sizeof(int));
mat_c[0]=(int*)malloc(size*size*sizeof(int));

   if(mat_a[0]==NULL){fprintf(stderr,"malloc failed\n");exit(1);}
if(mat_b[0]==NULL){fprintf(stderr,"malloc failed\n");exit(1);}
if(mat_c[0]==NULL){fprintf(stderr,"malloc failed\n");exit(1);}

  int ti;
    for(ti=1;ti<size;ti++){
        mat_a[ti]=&mat_a[0][size*ti];
mat_b[ti]=&mat_a[0][size*ti];
mat_c[ti]=&mat_a[0][size*ti];

 }

  mat_start = taskid * size/ntasks;
  mat_end = (taskid+1) * size/ntasks;

//populating the array ......

start_time = MPI_Wtime();

  MPI_Bcast(mat_a[0],size*size, MPI_INT,0,MPI_COMM_WORLD); 
 //MPI_Bcast (&mat_b, size*size, MPI_INT, 0, MPI_COMM_WORLD);
//  MPI_Scatter (&mat_b, size*size/ntasks, MPI_INT, mat_a[mat_start], size*size/ntasks, MPI_INT, 0, MPI_COMM_WORLD);
  MPI_Scatter (mat_b[0], size*size/ntasks, MPI_INT, mat_a[mat_start], size*size/ntasks, MPI_INT, 0, MPI_COMM_WORLD);

  printf("computing slice %d (from row %d to %d)\n", taskid, mat_start, mat_end-1);
  for (i=mat_start; i<mat_end; i++) 
    for (j=0; j<size; j++) {
      mat_c[i][j]=0;
      for (k=0; k<size; k++)
    mat_c[i][j] += mat_a[i][k]*mat_b[k][j];
    }

  MPI_Gather (mat_c[mat_start], size*size/ntasks, MPI_INT, mat_c, size*size/ntasks, MPI_INT, 0, MPI_COMM_WORLD);

        end_time = MPI_Wtime();
        printf("\nRunning Time = %f\n\n", end_time - start_time);

  MPI_Finalize();
  return 0;
}

并且程序开始运行甚至打印mat_a(添加打印时),但经过一段时间的延迟我得到了这个:

[cuda:05167] *** Process received signal ***
[cuda:05167] Signal: Segmentation fault (11)
[cuda:05167] Signal code:  (128)
[cuda:05167] Failing at address: (nil)

分散和聚集可能是roblem?如何改变,让它最终发挥作用?

2 个答案:

答案 0 :(得分:2)

问题是你在C中声明了一个二维数组,但这不是MPI所期待的!

MPI无法知道你已经为它提供了一个2d数组! MPI需要一个连续的数组(在你的情况下是 ints )。

要解决您的问题,您需要分配伪多维数组!这将保证您的记忆是连续的。在此之后,您将不会出现分段错误。

答案 1 :(得分:2)

分配内存的方式的问题是2D数组在内存中不连续:malloc()每行调用一次。见sending blocks of 2D array in C using MPI

要更改它,请使用以下过程:

int n=42;
int** mat_a=malloc(n*sizeof(int*));
if(mat_a==NULL){fprintf(stderr,"malloc failed\n");exit(1);}
mat_a[0]=malloc(n*n*sizeof(int));
if(mat_a[0]==NULL){fprintf(stderr,"malloc failed\n");exit(1);}
int i;
for(i=1;i<n;i++){
   mat_a[i]=&mat_a[0][n*i];
}
...
free(mat_a[0]);
free(mat_a);

其次,指针的值仅对给定的进程有意义。因此,通过执行MPI_Bcast(&mat_b,...)将指针从一个进程发送到另一个进程是错误的。如果在消息之后取消引用mat_b,则可以触发分段错误。可以改为发送缓冲区:

MPI_Bcast(mat_a[0],n*n, MPI_INT,0,MPI_COMM_WORLD);

mpicc main.c -o main -Wall编译并由mpirun -np 2 main运行的最小代码:

#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>


int main(int argc,char *argv[])
{

    int  size, rank;
    MPI_Init(&argc,&argv);
    MPI_Comm_rank(MPI_COMM_WORLD,&rank);
    MPI_Comm_size(MPI_COMM_WORLD,&size);    


    int n=42;
    int** mat_a=malloc(n*sizeof(int*));
    if(mat_a==NULL){fprintf(stderr,"malloc failed\n");exit(1);}
    mat_a[0]=malloc(n*n*sizeof(int));
    if(mat_a[0]==NULL){fprintf(stderr,"malloc failed\n");exit(1);}
    int i;
    for(i=1;i<n;i++){
        mat_a[i]=&mat_a[0][n*i];
    }


    //populating the array
    int j;
    if(rank==0){
        for(i=0;i<n;i++){
            for(j=0;j<n;j++){
                mat_a[i][j]=i+j;
            }
        }
    }

    // Bcast the array
    MPI_Bcast(mat_a[0],n*n, MPI_INT,0,MPI_COMM_WORLD);

    if(rank==1){
        for(i=0;i<n;i++){
            for(j=0;j<n;j++){
                printf("%d ",mat_a[i][j] );
            }
            printf("\n");
        }
    }

    free(mat_a[0]);
    free(mat_a);

    MPI_Finalize();
    return 0;
}