find_steady_state Subroutine

public subroutine find_steady_state(steady_state, generation_number, global_input_board, global_nrows, global_ncols, base_mpi_communicator, nprocs)

Subroutine to find the steady state of the game of life

Arguments

Type IntentOptional Attributes Name
logical, intent(out) :: steady_state

Logical flag indicating whether the global board has reached a steady state

integer, intent(out) :: generation_number

The number of generations required to reach the steady state

integer, intent(in), dimension(:,:), allocatable :: global_input_board

The global board representing the current state of the game

integer, intent(in) :: global_nrows

The number of columns in the global board

integer, intent(in) :: global_ncols

The number of rows in the global board

integer, intent(in) :: base_mpi_communicator

The base MPI communicator for parallel processing

integer, intent(in) :: nprocs

The total number of processes in the MPI communicator

Board args


Calls

proc~~find_steady_state~~CallsGraph proc~find_steady_state find_steady_state mpi_allreduce mpi_allreduce proc~find_steady_state->mpi_allreduce mpi_barrier mpi_barrier proc~find_steady_state->mpi_barrier mpi_cart_create mpi_cart_create proc~find_steady_state->mpi_cart_create mpi_comm_rank mpi_comm_rank proc~find_steady_state->mpi_comm_rank mpi_dims_create mpi_dims_create proc~find_steady_state->mpi_dims_create mpi_isend mpi_isend proc~find_steady_state->mpi_isend mpi_recv mpi_recv proc~find_steady_state->mpi_recv mpi_send mpi_send proc~find_steady_state->mpi_send proc~check_for_steady_state check_for_steady_state proc~find_steady_state->proc~check_for_steady_state proc~evolve_board evolve_board proc~find_steady_state->proc~evolve_board proc~exchange_boundaries exchange_boundaries proc~find_steady_state->proc~exchange_boundaries proc~get_local_grid_info get_local_grid_info proc~find_steady_state->proc~get_local_grid_info proc~exchange_boundaries->mpi_isend proc~exchange_boundaries->mpi_recv mpi_cart_coords mpi_cart_coords proc~get_local_grid_info->mpi_cart_coords mpi_cart_shift mpi_cart_shift proc~get_local_grid_info->mpi_cart_shift

Called by

proc~~find_steady_state~~CalledByGraph proc~find_steady_state find_steady_state program~main main program~main->proc~find_steady_state

Variables

Type Visibility Attributes Name Initial
integer, public :: row_start

MPI args

integer, public :: col_start

MPI args

type(DomainDecomposition), public :: domainDecomp

MPI args for rank 0 only

integer, public :: coords_i(2)

Timing

integer, public :: neighbours_i(4)

Timing

integer, public :: row_start_i

Timing

integer, public :: col_start_i

Timing

integer, public :: local_nrows_i

Timing

integer, public :: local_ncols_i

Timing

real, public :: start_time

Misc

real, public :: end_time

Misc


Source Code

    subroutine find_steady_state(steady_state, generation_number, global_input_board, global_nrows, global_ncols, &
                                 base_mpi_communicator, nprocs)
        !> Logical flag indicating whether the global board has reached a steady state
        logical, intent(out) :: steady_state
        !> The number of generations required to reach the steady state
        integer, intent(out) :: generation_number
        !> The global board representing the current state of the game
        integer, dimension(:,:), allocatable, intent(in) :: global_input_board
        !> The number of columns in the global board
        integer, intent(in) :: global_nrows
        !> The number of rows in the global board
        integer, intent(in) :: global_ncols
        !> The base MPI communicator for parallel processing
        integer, intent(in) :: base_mpi_communicator
        !> The total number of processes in the MPI communicator
        integer, intent(in) :: nprocs

        !! Board args
        integer, parameter :: max_generations = 100
        integer :: local_nrows, local_ncols, nrows_per_rank, ncols_per_rank
        integer, dimension(:,:), allocatable :: global_board, local_current, local_new
        logical :: local_steady
        integer :: row_start, col_start

        !! MPI args
        integer :: ierr, rank, mpi_req
        integer :: coords(2)
        logical :: periods(2)
        type(DomainDecomposition) :: domainDecomp

        !! MPI args for rank 0 only
        integer :: coords_i(2), neighbours_i(4), row_start_i, col_start_i, local_nrows_i, local_ncols_i

        !! Timing
        real :: start_time, end_time

        !! Misc
        integer :: i, j

        local_steady = .false.
        steady_state = .false.

        ! Create 2D Cartesian topology
        domainDecomp%dims = 0
        call MPI_Dims_create(nprocs, 2, domainDecomp%dims, ierr)   ! Automatically split into num_ranks_row row num_ranks_col grid
        periods = [ .false., .false. ]
        call MPI_Cart_create(base_mpi_communicator, 2, domainDecomp%dims, periods, .true., domainDecomp%communicator, ierr)
        call MPI_Comm_rank(domainDecomp%communicator, rank, ierr)

        call get_local_grid_info(domainDecomp, rank, global_nrows, global_ncols, nrows_per_rank, ncols_per_rank, coords, &
            row_start, col_start, local_nrows, local_ncols)

        allocate(local_current(local_nrows+2, local_ncols+2))
        allocate(local_new(local_nrows+2, local_ncols+2))
        local_current = 0
        local_new = 0

        ! Scatter global board
        if (rank == 0) then
            allocate(global_board(size(global_input_board, 1), size(global_input_board, 2)))
            global_board = global_input_board
            do i = 1, nprocs - 1
                call MPI_RECV(col_start_i, 1, MPI_INTEGER, i, i*100, domainDecomp%communicator, MPI_STATUS_IGNORE, ierr)
                call MPI_RECV(row_start_i, 1, MPI_INTEGER, i, i*100 + 1, domainDecomp%communicator, MPI_STATUS_IGNORE, ierr)
                call MPI_RECV(local_ncols_i, 1, MPI_INTEGER, i, i*100 + 2, domainDecomp%communicator, MPI_STATUS_IGNORE, ierr)
                call MPI_RECV(local_nrows_i, 1, MPI_INTEGER, i, i*100 + 3, domainDecomp%communicator, MPI_STATUS_IGNORE, ierr)

                call MPI_Send(global_board(row_start_i:row_start_i+local_nrows_i-1, col_start_i:col_start_i+local_ncols_i-1), &
                    local_nrows_i*local_ncols_i, MPI_INTEGER, i, i*100 + 4, domainDecomp%communicator, ierr)
            end do

            local_current(2:local_nrows+1, 2:local_ncols+1) = global_board(1:local_nrows, 1:local_ncols)
        else
            call MPI_ISEND(col_start, 1, MPI_INTEGER, 0, rank*100, domainDecomp%communicator, mpi_req, ierr)
            call MPI_ISEND(row_start, 1, MPI_INTEGER, 0, rank*100 + 1, domainDecomp%communicator, mpi_req, ierr)
            call MPI_ISEND(local_ncols, 1, MPI_INTEGER, 0, rank*100 + 2, domainDecomp%communicator, mpi_req, ierr)
            call MPI_ISEND(local_nrows, 1, MPI_INTEGER, 0, rank*100 + 3, domainDecomp%communicator, mpi_req, ierr)

            call MPI_Recv(local_current(2:local_nrows+1, 2:local_ncols+1), local_nrows*local_ncols, MPI_INTEGER, &
                        0, rank*100 + 4, domainDecomp%communicator, MPI_STATUS_IGNORE, ierr)
        endif

        local_new = local_current

        call MPI_Barrier(domainDecomp%communicator, ierr)

        generation_number = 0
        local_steady = .false.

        do while (.not. local_steady .and. generation_number < max_generations)
            ! Exchange ghost cells with neighbors
            call exchange_boundaries(local_current, local_nrows, local_ncols, domainDecomp)

            ! Evolution
            call evolve_board(local_current, local_new)
            call check_for_steady_state(local_steady, local_current, local_new)

            call MPI_Allreduce(local_steady, steady_state, 1, MPI_LOGICAL, MPI_LAND, domainDecomp%communicator, ierr)
            local_steady = steady_state

            local_current = local_new

            generation_number = generation_number + 1
        end do
    end subroutine find_steady_state