MPI_Type_vector似乎没有收到/发送它应该的东西

时间:2013-07-10 16:54:01

标签: mpi

我定义了一个大小为grid_size的方形矩阵,并在其内部部分(grid_size-2)中工作,而我将外边缘的下一个Ise接口到其他进程。我定义了一个环形拓扑,因此每个子矩阵过程都可以轻松计算其邻居。在正确发送行(比如[1][1][1][grid_size-2])之前,列(例如[1][1][grid_size-2][1])未正确发送 - 我使用MPI_Type_contiguous作为列的MPI_Type_vector行 - 我用空矩阵检查(它们是字符矩阵,所以我将它们初始化为\0),而行总是作为0发送,列不同于(半)随机位置。我错过了什么?

typedef char bool;
typedef bool **grid_t;

/* create a torroid topology */
void cart_create(MPI_Comm *new_comm, int Proc_Root) {
    int reorder = 1; /* allows processes reordered for efficiency */
    int periods[2], dim_size[2];
    dim_size[0] = Proc_Root; /* rows */
    dim_size[1] = Proc_Root; /* columns */
    periods[0] = 1; /* row periodic (each column forms a ring) */
    periods[1] = 1; /* columns periodic (each column forms a ring) */
    int comm_size;
    MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
    MPI_Cart_create(MPI_COMM_WORLD, 2, dim_size, periods, reorder, new_comm);
}

int main(int argc, char** argv) {

    /* ! MPI ! */
    MPI_Init(&argc, &argv);
    int rank;
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    int Num_of_Proc;
    MPI_Comm_size(MPI_COMM_WORLD, &Num_of_Proc);
    int Proc_Root = sqrt(Num_of_Proc);
    int Inner_Grid_Size = Num_of_Rows / Proc_Root; //size of process'submarix
    int Grid_Size = Inner_Grid_Size + 2; //grid size plus the ghost shells

    /* topology */
    MPI_Comm new_comm;
    cart_create(&new_comm, Proc_Root);

    /* allocate the grid */
    grid_t grid;
    create_grid(&grid, Grid_Size); // I fill it with 0
    grid_t grid2;
    create_empty_grid(&grid2, Grid_Size);
    grid_t new, old;

    bool *north_row = malloc(Inner_Grid_Size * sizeof *north_row);
    bool *south_row = malloc(Inner_Grid_Size * sizeof *south_row);
    bool *west_column = malloc(Inner_Grid_Size * sizeof *west_column);
    bool *east_column = malloc(Inner_Grid_Size * sizeof *east_column);
    // Works !
    MPI_Datatype rowtype;
    MPI_Type_contiguous(Inner_Grid_Size, MPI_CHAR, &rowtype); // MPI_C_BOOL
    MPI_Type_commit(&rowtype);
    // Where is the bug ?
    MPI_Datatype columntype;
    MPI_Type_vector(Inner_Grid_Size, 1, Grid_Size, MPI_CHAR, &columntype);
    MPI_Type_commit(&columntype);

    for (int k = 0; k < generations; k++) {
        if (k % 2) {
            old = grid2;
            new = grid;
        } else {
            old = grid;
            new = grid2;
        }
        MPI_Status status[16];
        MPI_Request reqs[16];
        MPI_Isend(&old[Inner_Grid_Size][1], 1, rowtype, neighboors_ranks[S],
                S, new_comm, &reqs[S]); //send to S
        MPI_Irecv(north_row, Inner_Grid_Size, MPI_CHAR, neighboors_ranks[N],
                S, new_comm, &reqs[S + EIGHT]); //receive from N
        // above works
        // below not
        MPI_Isend(&old[1][1], 1, columntype, neighboors_ranks[W], W,
                new_comm, &reqs[W]); //send to W
        MPI_Irecv(east_column, Inner_Grid_Size, MPI_CHAR, neighboors_ranks[E],
                W, new_comm, &reqs[W + EIGHT]); //receive from E
        MPI_Isend(&old[1][Inner_Grid_Size], 1, columntype, neighboors_ranks[E],
                E, new_comm, &reqs[E]); //send to E
        MPI_Irecv(west_column, Inner_Grid_Size, MPI_CHAR, neighboors_ranks[W],
                E, new_comm, &reqs[E + EIGHT]); //receive from W

        MPI_Waitall(EIGHT, reqs + EIGHT, status + EIGHT); //Wait receives
        if (rank == root)
            for (int p = 0; p < Inner_Grid_Size; p++) {
                printf("east[%d]=%d\n", p, east_column[p]); // should be 0 !?
                //  printf("north,%d\n", north_row[p]); // prints ok
                printf("west[%d]=%d\n", p, west_column[p]); // should be 0 !?
            }
        //...
    }
}

编辑:分配

void create_grid(grid_t *grid, int size) {
    srand(time(NULL) ^get_rank() << 16);
    if ((*grid = malloc(size * (sizeof **grid))) == NULL) return;
    for (int i = 0; i < size; ++i) {
        (*grid)[i] = malloc(size * (sizeof *((*grid)[i])));
        for (int j = 0; j < size; ++j) {
            (*grid)[i][j] = 0; /*was random */
        }
    }
}

/* the grid will be full of 0 */
void create_empty_grid(grid_t *grid, int size) {
    if ((*grid = malloc(size * (sizeof **grid))) == NULL) return;
    // the outer edges will be filled by the other processes
    for (int i = 0; i < size; ++i) {
        (*grid)[i] = malloc(size * (sizeof *((*grid)[i])));
        memset((*grid)[i], 0, sizeof (*(*grid)[i]) * size);
    }
}

void print_grid(grid_t grid, int start, int size) {
    for (int i = start; i < size; ++i) {
        for (int j = start; j < size; ++j) {
            if (grid[i][j]) {
                printf("@");
            } else {
                printf(".");
            }
        }
        printf("\n");
    }
    printf("\n");
}

1 个答案:

答案 0 :(得分:1)

在使用MPI处理C中的“多维数组”时,这经常会出现(例如,请参阅此question/answerthis one)。它不是MPI的东西,它是C的东西。

在C中分配数组数组以获取多维数组的标准方法并没有给你一个连续的内存块。每行(例如,每个malloc)是单独连续的,但下一行可以是内存中的任何其他位置。

因此,跳过Grid_Size项以查找列中的下一项的公式将不起作用(并且根据网格大小可能会出现段错误)。与上述答案一样,

将分配更改为类似

data = malloc(size*size*sizeof(type)); 
grid = malloc(size*sizeof(type *)); 
for (int i=0; i<size; i++) 
    *grid[i] = &(data[i*size]);

或者你会看到的任何变种。这为您提供了一个size*size类型的块,其中grid[]数组指向它。然后通过

完成释放
free(&(grid[0]));
free(grid);
相关问题