diff --git a/assemble/Adapt_State.F90 b/assemble/Adapt_State.F90 index 6082350542..a3cf83d546 100644 --- a/assemble/Adapt_State.F90 +++ b/assemble/Adapt_State.F90 @@ -28,6 +28,7 @@ #include "fdebug.h" module adapt_state_module + use iso_c_binding use spud use fldebug use global_parameters, only : OPTION_PATH_LEN, periodic_boundary_option_path, adaptivity_mesh_name, adaptivity_mesh_name, domain_bbox, topology_mesh_name, FIELD_NAME_LEN @@ -1063,9 +1064,12 @@ subroutine adapt_state_internal(states, metric, initialise_fields) type(detector_type), pointer :: detector => null() real :: global_min_quality, quality_tolerance + integer (c_int) :: MPI_COMM_SAVE ewrite(1, *) "In adapt_state_internal" + MPI_COMM_SAVE=MPI_COMM_NONEMPTY + nullify(node_ownership) max_adapt_iteration = adapt_iterations() @@ -1157,7 +1161,7 @@ subroutine adapt_state_internal(states, metric, initialise_fields) ! Generate a new mesh field based on the current mesh field and the input ! metric - if (.not. vertical_only) then + if (.not. vertical_only .and. node_count(old_positions)>0) then call adapt_mesh(old_positions, metric, new_positions, node_ownership = node_ownership, & & force_preserve_regions=initialise_fields) else @@ -1433,6 +1437,8 @@ subroutine adapt_state_internal(states, metric, initialise_fields) call compute_domain_statistics(states) end if + call free_communicator(MPI_COMM_SAVE) + ewrite(1, *) "Exiting adapt_state_internal" end subroutine adapt_state_internal diff --git a/assemble/Surface_Id_Interleaving.F90 b/assemble/Surface_Id_Interleaving.F90 index aafa02ac26..f7e40aa748 100644 --- a/assemble/Surface_Id_Interleaving.F90 +++ b/assemble/Surface_Id_Interleaving.F90 @@ -105,7 +105,7 @@ subroutine interleave_surface_ids_vector(mesh, interleaved_surface_ids, max_copl if(isparallel()) then #ifdef HAVE_MPI ! Max. coplanar_id must be global to ensure consistent global surface ids - call mpi_allreduce(max_coplanar_id, all_max_coplanar_id, 1, getpinteger(), MPI_MAX, MPI_COMM_FEMTOOLS, ierr) + call mpi_allreduce(max_coplanar_id, all_max_coplanar_id, 1, getpinteger(), MPI_MAX, MPI_COMM_NONEMPTY, ierr) assert(ierr == MPI_SUCCESS) max_coplanar_id = all_max_coplanar_id #endif diff --git a/assemble/Zoltan_callbacks.F90 b/assemble/Zoltan_callbacks.F90 index fa53670772..d310b7eb4c 100644 --- a/assemble/Zoltan_callbacks.F90 +++ b/assemble/Zoltan_callbacks.F90 @@ -11,7 +11,7 @@ module zoltan_callbacks use fldebug use data_structures use mpi_interfaces - use parallel_tools, only: getrank, getnprocs, getprocno, MPI_COMM_FEMTOOLS + use parallel_tools, only: getrank, getnprocs, getprocno, MPI_COMM_FEMTOOLS, MPI_COMM_NONEMPTY use sparse_tools use element_numbering use elements @@ -132,7 +132,7 @@ subroutine zoltan_cb_get_num_edges(data, num_gid_entries, num_lid_entries, num_o assert(num_gid_entries == 1) assert(num_lid_entries == 1) - count = zoltan_global_zz_halo%nowned_nodes + count = halo_nowned_nodes(zoltan_global_zz_halo) assert(count == num_obj) do node=1,count @@ -186,8 +186,8 @@ subroutine zoltan_cb_get_edge_list(data, num_gid_entries, num_lid_entries, num_o assert(num_gid_entries == 1) assert(num_lid_entries == 1) assert(wgt_dim == 1) - - count = zoltan_global_zz_halo%nowned_nodes + + count = halo_nowned_nodes(zoltan_global_zz_halo) assert(count == num_obj) my_num_edges = sum(num_edges(1:num_obj)) @@ -213,14 +213,14 @@ subroutine zoltan_cb_get_edge_list(data, num_gid_entries, num_lid_entries, num_o ! find global ids for each neighbour nbor_global_id(head:head+size(neighbours)-1) = halo_universal_number(zoltan_global_zz_halo, neighbours) ! find owning proc for each neighbour - nbor_procs(head:head+size(neighbours)-1) = halo_node_owners(zoltan_global_zz_halo, neighbours) - 1 + nbor_procs(head:head+size(neighbours)-1) = global_proc_no(halo_node_owners(zoltan_global_zz_halo, neighbours)) - 1 head = head + size(neighbours) end do ierr = ZOLTAN_OK return else call MPI_ALLREDUCE(my_num_edges,total_num_edges,1,MPI_INTEGER,MPI_SUM, & - MPI_COMM_FEMTOOLS,err) + MPI_COMM_NONEMPTY,err) end if head = 1 @@ -242,7 +242,7 @@ subroutine zoltan_cb_get_edge_list(data, num_gid_entries, num_lid_entries, num_o nbor_global_id(head:head+size(neighbours)-1) = halo_universal_number(zoltan_global_zz_halo, neighbours) ! find owning proc for each neighbour - nbor_procs(head:head+size(neighbours)-1) = halo_node_owners(zoltan_global_zz_halo, neighbours) - 1 + nbor_procs(head:head+size(neighbours)-1) = global_proc_no(halo_node_owners(zoltan_global_zz_halo, neighbours))-1 ! get elements associated with current node my_nelist => row_m_ptr(zoltan_global_zz_nelist, local_ids(node)) @@ -299,10 +299,10 @@ subroutine zoltan_cb_get_edge_list(data, num_gid_entries, num_lid_entries, num_o my_min_weight = minval(ewgts(1:head-1)) ! calculate global maximum edge weight - call MPI_ALLREDUCE(my_max_weight,max_weight,1,MPI_REAL,MPI_MAX, MPI_COMM_FEMTOOLS,err) + call MPI_ALLREDUCE(my_max_weight,max_weight,1,MPI_REAL,MPI_MAX, MPI_COMM_NONEMPTY,err) ! calculate global minimum edge weight - call MPI_ALLREDUCE(my_min_weight,min_weight,1,MPI_REAL,MPI_MIN, MPI_COMM_FEMTOOLS,err) + call MPI_ALLREDUCE(my_min_weight,min_weight,1,MPI_REAL,MPI_MIN, MPI_COMM_NONEMPTY,err) ! calculate the local 90th percentile edge weight ninety_weight = max_weight * 0.90 @@ -427,7 +427,7 @@ subroutine zoltan_cb_pack_nodes(data, num_gid_entries, num_lid_entries, num_ids, buf(head:head+row_length(zoltan_global_zz_sparsity_two, node)-1) = halo_universal_number(zoltan_global_zz_halo, row_m_ptr(zoltan_global_zz_sparsity_two, node)) head = head + row_length(zoltan_global_zz_sparsity_two, node) - buf(head:head+row_length(zoltan_global_zz_sparsity_two, node)-1) = halo_node_owners(zoltan_global_zz_halo, row_m_ptr(zoltan_global_zz_sparsity_two, node)) + buf(head:head+row_length(zoltan_global_zz_sparsity_two, node)-1) = global_proc_no(halo_node_owners(zoltan_global_zz_halo, row_m_ptr(zoltan_global_zz_sparsity_two, node))) head = head + row_length(zoltan_global_zz_sparsity_two, node) buf(head) = row_length(zoltan_global_zz_nelist, node) @@ -509,7 +509,7 @@ subroutine zoltan_cb_unpack_nodes(data, num_gid_entries, num_ids, global_ids, si universal_number = halo_universal_number(zoltan_global_zz_halo, neighbours(j)) call insert(zoltan_global_new_nodes, universal_number, changed=changed) if (changed) then ! so it is a halo node - old_owner = halo_node_owner(zoltan_global_zz_halo, neighbours(j)) - 1 + old_owner = global_proc_no(halo_node_owner(zoltan_global_zz_halo, neighbours(j))) - 1 assert(old_owner < getnprocs()) if (old_owner == rank) then call insert(halo_nodes_we_currently_own, neighbours(j)) diff --git a/assemble/Zoltan_global_variables.F90 b/assemble/Zoltan_global_variables.F90 index f7486b65d0..b3f0271aa9 100644 --- a/assemble/Zoltan_global_variables.F90 +++ b/assemble/Zoltan_global_variables.F90 @@ -39,6 +39,7 @@ module zoltan_global_variables type(scalar_field), save, pointer, public :: zoltan_global_max_edge_weight_on_node logical, save, public :: zoltan_global_output_edge_weights = .false. type(csr_sparsity), save, pointer, public :: zoltan_global_zz_nelist + integer, dimension(:), save, public, allocatable :: global_proc_no ! Needed for zoltan_cb_pack_node_sizes ! - added vector_field to use fields diff --git a/assemble/Zoltan_integration.F90 b/assemble/Zoltan_integration.F90 index 661230f51b..6cbaae38ab 100644 --- a/assemble/Zoltan_integration.F90 +++ b/assemble/Zoltan_integration.F90 @@ -5,6 +5,7 @@ module zoltan_integration #ifdef HAVE_ZOLTAN use spud + use iso_c_binding use fldebug use global_parameters, only: real_size, OPTION_PATH_LEN, topology_mesh_name,& FIELD_NAME_LEN @@ -101,7 +102,7 @@ subroutine zoltan_drive(states, final_adapt_iteration, global_min_quality, metri real :: load_imbalance_tolerance logical :: flredecomp real :: minimum_quality - integer :: flredecomp_input_procs = -1, flredecomp_target_procs = -1 + integer :: flredecomp_input_procs = -1, flredecomp_target_procs = -1, ierr ewrite(1,*) "In zoltan_drive" @@ -140,6 +141,13 @@ subroutine zoltan_drive(states, final_adapt_iteration, global_min_quality, metri zoltan_global_field_weighted_partitions = & have_option(trim(zoltan_global_base_option_path) // "/field_weighted_partitions") + if (.not. allocated(global_proc_no)) then + allocate(global_proc_no(getnprocs(MPI_COMM_NONEMPTY))) + call MPI_ALLGATHER(getprocno(MPI_COMM_WORLD),1,MPI_INTEGER, & + global_proc_no,1,MPI_INTEGER, & + MPI_COMM_NONEMPTY,ierr) + end if + call setup_module_variables(states, final_adapt_iteration, zz, flredecomp) call setup_quality_module_variables(metric, minimum_quality) ! this needs to be called after setup_module_variables @@ -222,7 +230,8 @@ subroutine zoltan_drive(states, final_adapt_iteration, global_min_quality, metri call reconstruct_enlist call reconstruct_senlist - call reconstruct_halo(zz) + call reconstruct_halo(zz, flredecomp, flredecomp_input_procs, & + flredecomp_target_procs) if(zoltan_global_migrate_extruded_mesh) then zoltan_global_new_positions_m1d = zoltan_global_new_positions ! save a reference to the horizontal mesh you've just load balanced @@ -265,7 +274,8 @@ subroutine zoltan_drive(states, final_adapt_iteration, global_min_quality, metri call reconstruct_enlist call reconstruct_senlist - call reconstruct_halo(zz) + call reconstruct_halo(zz, flredecomp, flredecomp_input_procs, & + flredecomp_target_procs) if (.not. verify_consistent_local_element_numbering(zoltan_global_new_positions%mesh) ) then ewrite(-1,*) "For the extruded mesh, the local element numbering of elements in the halo region" // & @@ -367,7 +377,7 @@ subroutine setup_module_variables(states, final_adapt_iteration, zz, flredecomp, zoltan_global_zz_nelist => extract_nelist(zoltan_global_zz_mesh) - zz => Zoltan_Create(halo_communicator(zoltan_global_zz_mesh)) + zz => Zoltan_Create(MPI_COMM_FEMTOOLS) nhalos = halo_count(zoltan_global_zz_mesh) assert(nhalos == 2) @@ -395,7 +405,7 @@ subroutine setup_module_variables(states, final_adapt_iteration, zz, flredecomp, call insert(zoltan_global_old_local_numbering_to_uen, i, halo_universal_number(zoltan_global_zz_ele_halo, i)) end do - allocate(zoltan_global_receives(halo_proc_count(zoltan_global_zz_halo))) + allocate(zoltan_global_receives(getnprocs(MPI_COMM_FEMTOOLS))) do i=1,size(zoltan_global_receives) call allocate(zoltan_global_receives(i)) end do @@ -858,6 +868,7 @@ subroutine zoltan_load_balance(zz, changes, num_gid_entries, num_lid_entries, & integer :: min_num_nodes_after_balance, total_num_nodes_before_balance, total_num_nodes_after_balance integer :: num_empty_partitions, empty_partition character (len = 10) :: string_load_imbalance_tolerance + logical :: flag ewrite(1,*) 'in zoltan_load_balance' @@ -927,10 +938,16 @@ subroutine zoltan_load_balance(zz, changes, num_gid_entries, num_lid_entries, & else min_num_nodes_after_balance = 0 - do while (min_num_nodes_after_balance == 0) - + flag =.true. + do while (min_num_nodes_after_balance == 0 .and. flag) + ierr = Zoltan_LB_Balance(zz, changes, num_gid_entries, num_lid_entries, p1_num_import, p1_import_global_ids, & & p1_import_local_ids, p1_import_procs, p1_num_export, p1_export_global_ids, p1_export_local_ids, p1_export_procs) + ierr = Zoltan_LB_Partition(zz, changes, num_gid_entries, num_lid_entries, p1_num_import, p1_import_global_ids, & + & p1_import_local_ids, p1_import_procs, import_to_part, p1_num_export, p1_export_global_ids, & + & p1_export_local_ids, p1_export_procs, export_to_part) +! ierr = Zoltan_LB_Balance(zz, changes, num_gid_entries, num_lid_entries, p1_num_import, p1_import_global_ids, & +! & p1_import_local_ids, p1_import_procs, p1_num_export, p1_export_global_ids, p1_export_local_ids, p1_export_procs) assert(ierr == ZOLTAN_OK) ! calculate how many owned nodes we'd have after doing the planned load balancing @@ -967,7 +984,8 @@ subroutine zoltan_load_balance(zz, changes, num_gid_entries, num_lid_entries, & assert(ierr == MPI_SUCCESS) if (min_num_nodes_after_balance == 0) then - FLAbort("Could not stop Zoltan creating empty partitions.") + ewrite(-1,*) "Could not stop Zoltan creating empty partitions." + flag = .false. else ewrite(-1,*) 'Load balancing was carried out without edge-weighting being applied. Mesh may not be of expected quality.' end if @@ -1200,7 +1218,7 @@ subroutine deal_with_exporters universal_number = halo_universal_number(zoltan_global_zz_halo, neighbours(j)) call insert(zoltan_global_new_nodes, universal_number, changed=changed) if (changed) then - old_owner = halo_node_owner(zoltan_global_zz_halo, neighbours(j)) - 1 + old_owner = global_proc_no(halo_node_owner(zoltan_global_zz_halo, neighbours(j))) - 1 if (old_owner == rank) then call insert(halo_nodes_we_currently_own, neighbours(j)) else @@ -1623,7 +1641,7 @@ subroutine reconstruct_senlist ewrite(1,*) "Exiting reconstruct_senlist" end subroutine reconstruct_senlist - subroutine reconstruct_halo(zz) + subroutine reconstruct_halo(zz, flredecomp, input_procs, target_procs) ! At this point, the receives sets have been populated with all ! the universal node numbers we need to receive from each process. ! So, we are going to use zoltan to invert this to compute @@ -1633,22 +1651,28 @@ subroutine reconstruct_halo(zz) ! l2e halo. ! Supply the peeps with jeeps, brick apiece, capiche? - type(zoltan_struct), pointer, intent(in) :: zz + type(zoltan_struct), pointer, intent(in) :: zz + logical, intent(in) :: flredecomp + integer, intent(in) :: input_procs, target_procs - integer :: num_import, num_export + integer :: num_import, num_export, nprocs integer, dimension(:), pointer :: import_global_ids, import_local_ids, import_procs integer, dimension(:), pointer :: export_global_ids, export_local_ids, export_procs, export_to_part - integer :: ierr, i, head + integer :: ierr, i, j, head type(integer_set), dimension(size(zoltan_global_receives)) :: sends integer, dimension(size(zoltan_global_receives)) :: nreceives, nsends + integer, dimension(:), allocatable :: nonempty integer, dimension(ele_count(zoltan_global_new_positions)) :: ele_renumber_permutation integer, dimension(node_count(zoltan_global_new_positions)) :: node_renumber_permutation integer :: universal_element_number, old_new_local_element_number, new_new_local_element_number integer :: universal_node_number, old_new_local_node_number, new_new_local_node_number integer, dimension(ele_count(zoltan_global_new_positions)) :: old_new_region_ids - + integer (c_int) :: comm + ewrite(1,*) "In reconstruct_halo" + + allocate(nonempty(getnprocs(MPI_COMM_FEMTOOLS))) num_import = 0 do i=1,size(zoltan_global_receives) @@ -1706,19 +1730,46 @@ subroutine reconstruct_halo(zz) ! Allocate the halo and such ! We had to grow dreads to change our description, two cops is on a milkbox, missin' - + call reset_next_mpi_tag() allocate(zoltan_global_new_positions%mesh%halos(2)) + if (allocated(global_proc_no)) deallocate(global_proc_no) + + if (flredecomp .and. input_procs>target_procs) then + allocate(global_proc_no(input_procs)) + global_proc_no = [(i,i=1,input_procs)] + nprocs = input_procs + comm = MPI_COMM_FEMTOOLS + nonempty = 1 + ! In this context we don't need to worry about nonempty partitions + call split_communicator(MPI_COMM_FEMTOOLS, MPI_COMM_NONEMPTY, .true.) + else + call split_communicator(MPI_COMM_FEMTOOLS, MPI_COMM_NONEMPTY,& + node_count(zoltan_global_new_positions)>0) + allocate(global_proc_no(getnprocs(MPI_COMM_NONEMPTY))) + call MPI_ALLGATHER(getprocno(MPI_COMM_WORLD),1,MPI_INTEGER, & + global_proc_no,1,MPI_INTEGER, & + MPI_COMM_NONEMPTY,ierr) + nprocs = getnprocs(MPI_COMM_NONEMPTY) + comm = MPI_COMM_NONEMPTY + call MPI_Allgather(merge(1,0,node_count(zoltan_global_new_positions)>0), 1, MPI_INT,& + nonempty, 1, MPI_INT, MPI_COMM_FEMTOOLS, ierr) + end if + call allocate(zoltan_global_new_positions%mesh%halos(2), & - nsends = nsends, & - nreceives = nreceives, & + nsends = pack(nsends, nonempty == nonempty(getprocno(MPI_COMM_FEMTOOLS))), & + nreceives = pack(nreceives, nonempty == nonempty(getprocno(MPI_COMM_FEMTOOLS))), & + nprocs = nprocs, & name = halo_name(zoltan_global_zz_halo), & - communicator = halo_communicator(zoltan_global_zz_halo), & + communicator = comm, & nowned_nodes = key_count(zoltan_global_new_nodes) - num_import, & data_type = halo_data_type(zoltan_global_zz_halo)) + j=1 do i=1,size(zoltan_global_receives) - call set_halo_sends(zoltan_global_new_positions%mesh%halos(2), i, fetch(zoltan_global_universal_to_new_local_numbering, set2vector(sends(i)))) - call set_halo_receives(zoltan_global_new_positions%mesh%halos(2), i, fetch(zoltan_global_universal_to_new_local_numbering, set2vector(zoltan_global_receives(i)))) + if (nonempty(i) == 0) cycle + call set_halo_sends(zoltan_global_new_positions%mesh%halos(2), j, fetch(zoltan_global_universal_to_new_local_numbering, set2vector(sends(i)))) + call set_halo_receives(zoltan_global_new_positions%mesh%halos(2), j, fetch(zoltan_global_universal_to_new_local_numbering, set2vector(zoltan_global_receives(i)))) + j=j+1 end do ! Now derive all the other halos ... @@ -1772,6 +1823,7 @@ subroutine reconstruct_halo(zz) end do call reorder_element_numbering(zoltan_global_new_positions) + ewrite(1,*) "Exiting reconstruct_halo" @@ -2040,7 +2092,7 @@ subroutine transfer_fields(zz) integer :: old_ele integer, dimension(:), pointer :: old_local_nodes, nodes type(element_type), pointer :: eshape - type(integer_set), dimension(halo_proc_count(zoltan_global_zz_halo)) :: sends + type(integer_set), dimension(:), allocatable :: sends integer :: i, j, new_owner, universal_element_number type(integer_set) :: self_sends integer :: num_import, num_export @@ -2061,6 +2113,8 @@ subroutine transfer_fields(zz) type(detector_type), pointer :: detector => null(), add_detector => null() ewrite(1,*) 'in transfer_fields' + + allocate(sends(getnprocs(MPI_COMM_FEMTOOLS))) do i=1,size(sends) call allocate(sends(i)) diff --git a/diagnostics/Parallel_Diagnostics.F90 b/diagnostics/Parallel_Diagnostics.F90 index 880fa7a716..e87962c286 100644 --- a/diagnostics/Parallel_Diagnostics.F90 +++ b/diagnostics/Parallel_Diagnostics.F90 @@ -58,7 +58,7 @@ subroutine calculate_element_halo(s_field) integer :: i, j type(element_type), pointer :: shape - assert(ele_count(s_field) > 0) + assert(ele_count(s_field) >= 0) shape => ele_shape(s_field, 1) if(shape%degree /= 0) then FLExit("element_halo diagnostic requires a degree 0 mesh") @@ -81,7 +81,7 @@ subroutine calculate_element_ownership(s_field) type(element_type), pointer :: shape type(halo_type), pointer :: ele_halo - assert(ele_count(s_field) > 0) + assert(ele_count(s_field) >= 0) shape => ele_shape(s_field, 1) if(shape%degree /= 0) then FLExit("element_halo_ownership diagnostic requires a degree 0 mesh") @@ -107,7 +107,7 @@ subroutine calculate_element_universal_numbering(s_field) type(element_type), pointer :: shape type(halo_type), pointer :: ele_halo - assert(ele_count(s_field) > 0) + assert(ele_count(s_field) >= 0) shape => ele_shape(s_field, 1) if(shape%degree /= 0) then FLExit("element_universal_numbering diagnostic requires a degree 0 mesh") diff --git a/femtools/Checkpoint.F90 b/femtools/Checkpoint.F90 index b6d60a0a28..f04c7f3ef8 100644 --- a/femtools/Checkpoint.F90 +++ b/femtools/Checkpoint.F90 @@ -514,11 +514,11 @@ subroutine checkpoint_meshes(state, prefix, postfix, cp_no, keep_initial_data, n position => extract_vector_field(state(1), trim(mesh%name)//"Coordinate") end if - if(nparts > 1) then + if(getnprocs() > 1) then call write_mesh_files(parallel_filename(mesh_filename), mesh_format, position, number_of_partitions=number_of_partitions) ! Write out the halos ewrite(2, *) "Checkpointing halos" - call write_halos(mesh_filename, mesh, number_of_partitions=number_of_partitions) + call write_halos(mesh_filename, mesh, number_of_partitions=getnprocs(MPI_COMM_NONEMPTY)) else ! Write out the mesh call write_mesh_files(mesh_filename, mesh_format, position, number_of_partitions=number_of_partitions) diff --git a/femtools/Global_Numbering.F90 b/femtools/Global_Numbering.F90 index 2a50f27388..935025468d 100644 --- a/femtools/Global_Numbering.F90 +++ b/femtools/Global_Numbering.F90 @@ -115,6 +115,7 @@ subroutine make_halo_dg(element, element_halo, new_halo) !! Query what is the naming convention for halos. name=trim(halo_name(element_halo)) // "DG", & nprocs=element_halo%nprocs, & + communicator=halo_communicator(element_halo), & nowned_nodes=element_halo%nowned_nodes*element%loc, & data_type=HALO_TYPE_DG_NODE, & ordering_scheme=halo_ordering_scheme(element_halo)) diff --git a/femtools/Halos_Communications.F90 b/femtools/Halos_Communications.F90 index d8da5e257e..34e67491bc 100644 --- a/femtools/Halos_Communications.F90 +++ b/femtools/Halos_Communications.F90 @@ -795,7 +795,7 @@ function halo_verifies_array_real(halo, real_array) result(verifies) call halo_update(halo, lreal_array) epsl = spacing( maxval( abs( lreal_array ))) * 10000. - call allmax(epsl) + call allmax(epsl, halo_communicator(halo)) verifies = all(abs(real_array - lreal_array) < epsl ) #ifdef DDEBUG diff --git a/femtools/Halos_Derivation.F90 b/femtools/Halos_Derivation.F90 index 14685c4ae1..98a385a7d6 100644 --- a/femtools/Halos_Derivation.F90 +++ b/femtools/Halos_Derivation.F90 @@ -1493,7 +1493,68 @@ function expand_mesh_halo(mesh) result (new_mesh) nhalos = size(mesh%halos) old_halo => mesh%halos(nhalos) ele_halo => mesh%element_halos(size(mesh%element_halos)) - new_halo = expand_halo(mesh, old_halo) + + has_surface_mesh = .false. + if (associated(mesh%faces)) then + if (associated(mesh%faces%boundary_ids)) then + has_surface_mesh = .true. + end if + end if + + if (halo_proc_count(old_halo) == 1) then + call allocate(new_mesh, node_count(mesh), element_count(mesh), & + mesh%shape, name=mesh%name) + nhalos = size(mesh%halos) + allocate(new_mesh%halos(nhalos+1)) + do i=1, nhalos + new_mesh%halos(i) = mesh%halos(i) + call incref(new_mesh%halos(i)) + end do + new_mesh%halos(nhalos+1) = mesh%halos(nhalos) + call incref(new_mesh%halos(nhalos+1)) + new_mesh%option_path = mesh%option_path + do ele=1, element_count(mesh) + call set_ele_nodes(new_mesh, ele, ele_nodes(mesh, ele)) + end do + + if (has_surface_mesh) then + snloc = mesh%faces%shape%loc + allocate(sndgln(1:surface_element_count(mesh)*snloc)) + allocate(boundary_ids(1:surface_element_count(mesh))) + if (has_discontinuous_internal_boundaries(mesh)) then + allocate(element_owners(1:size(boundary_ids))) + end if + ! first copy element from trusted (see above) existing surface facets + do i=1, unique_surface_element_count(mesh) + ele = face_ele(mesh, i) + lface = local_face_number(mesh, i) + ufid = halo_universal_number(ele_halo, ele)*nfaces + lface + sndgln((i-1)*snloc+1:i*snloc) = face_global_nodes(mesh, i) + boundary_ids(i) = surface_element_id(mesh, i) + if (has_discontinuous_internal_boundaries(mesh)) then + element_owners(i) = ele + end if + end do + if (has_discontinuous_internal_boundaries(mesh)) then + call add_faces(new_mesh, sndgln=sndgln, boundary_ids=boundary_ids, element_owner=element_owners) + else + call add_faces(new_mesh, sndgln=sndgln, boundary_ids=boundary_ids, & + allow_duplicate_internal_facets=.true.) + end if + allocate(new_mesh%element_halos(nhalos+1)) + call derive_element_halo_from_node_halo(new_mesh, & + & ordering_scheme = HALO_ORDER_TRAILING_RECEIVES, create_caches = .true.) + + if (associated(mesh%region_ids)) then + ! now we can simply copy and halo update the region ids + allocate(new_mesh%region_ids(1:element_count(mesh))) + new_mesh%region_ids(1:element_count(mesh)) = mesh%region_ids + end if + end if + return + end if + + new_halo = expand_halo(mesh, old_halo) ! map from universal node id to global node id call create_global_to_universal_numbering(new_halo) @@ -1515,13 +1576,6 @@ function expand_mesh_halo(mesh) result (new_mesh) ! if associated(mesh%faces): 3) the surface ids of adjacent facets ele_info_size = 1+nloc - has_surface_mesh = .false. - if (associated(mesh%faces)) then - if (associated(mesh%faces%boundary_ids)) then - has_surface_mesh = .true. - end if - end if - if (has_surface_mesh) then snloc = mesh%faces%shape%loc ele_info_size = ele_info_size+nfaces diff --git a/femtools/Halos_IO.cpp b/femtools/Halos_IO.cpp index 0ef666cde0..9865b0f938 100644 --- a/femtools/Halos_IO.cpp +++ b/femtools/Halos_IO.cpp @@ -72,9 +72,6 @@ HaloReadError Fluidity::ReadHalos(const string& filename, int& process, int& npr return HALO_READ_FILE_INVALID; } nprocs = atoi(charBuffer); - if(process >= nprocs){ - return HALO_READ_FILE_INVALID; - } // Extract halo data for each process for each level npnodes.clear(); @@ -176,7 +173,6 @@ HaloReadError Fluidity::ReadHalos(const string& filename, int& process, int& npr int Fluidity::WriteHalos(const string& filename, const unsigned int& process, const unsigned int& nprocs, const map& npnodes, const map > >& send, const map > >& recv){ #ifdef DDEBUG // Input check - assert(process < nprocs); assert(send.size() == recv.size()); for(map > >::const_iterator sendIter = send.begin(), recvIter = recv.begin();sendIter != send.end() and recvIter != recv.end(), recvIter != recv.end();sendIter++, recvIter++){ assert(recv.count(sendIter->first) != 0); diff --git a/femtools/Halos_Registration.F90 b/femtools/Halos_Registration.F90 index 322b8b1523..7b5e9fa768 100644 --- a/femtools/Halos_Registration.F90 +++ b/femtools/Halos_Registration.F90 @@ -140,10 +140,10 @@ subroutine read_halos_mesh(filename, mesh, communicator) if(present(communicator)) then lcommunicator = communicator else - lcommunicator = MPI_COMM_FEMTOOLS + lcommunicator = MPI_COMM_NONEMPTY end if - procno = getprocno(communicator = lcommunicator) + procno = getprocno(communicator = MPI_COMM_FEMTOOLS) nprocs = getnprocs(communicator = lcommunicator) error_count = chalo_reader_set_input(filename, len_trim(filename), procno - 1, nprocs) @@ -259,7 +259,7 @@ subroutine write_halos(filename, mesh, number_of_partitions) if(nhalos == 0) return communicator = halo_communicator(mesh%halos(nhalos)) - procno = getprocno(communicator = communicator) + procno = getprocno(communicator = MPI_COMM_FEMTOOLS) if (present(number_of_partitions)) then nparts = number_of_partitions @@ -267,7 +267,7 @@ subroutine write_halos(filename, mesh, number_of_partitions) nparts = getnprocs() end if - if(procno <= nparts) then + if(procno <= getnprocs()) then nprocs = getnprocs(communicator = communicator) call chalo_writer_initialise(procno - 1, nparts) @@ -293,7 +293,7 @@ subroutine write_halos(filename, mesh, number_of_partitions) error_count = 0 end if - call allsum(error_count, communicator = communicator) + call allsum(error_count, communicator = MPI_COMM_FEMTOOLS) if(error_count > 0) then FLExit("Unable to write halos with name " // trim(filename)) end if diff --git a/femtools/Interpolation.F90 b/femtools/Interpolation.F90 index 2a741891a1..105bcbacd4 100644 --- a/femtools/Interpolation.F90 +++ b/femtools/Interpolation.F90 @@ -48,6 +48,8 @@ function get_element_mapping(old_position, new_position, different_domains, only integer, dimension(node_count(new_position)) :: map integer :: i + if (size(map) == 0) return + ! Thanks, James! call find_node_ownership(old_position, new_position, map) diff --git a/femtools/Parallel_Tools.F90 b/femtools/Parallel_Tools.F90 index 1403db722c..cbfe3915e1 100644 --- a/femtools/Parallel_Tools.F90 +++ b/femtools/Parallel_Tools.F90 @@ -45,9 +45,13 @@ module parallel_tools getnprocs, getpinteger, getpreal, getprocno, getrank, & isparallel, parallel_filename, parallel_filename_len, & pending_communication, valid_communicator, next_mpi_tag, & - MPI_COMM_FEMTOOLS, set_communicator + MPI_COMM_FEMTOOLS, set_communicator, split_communicator,& + free_communicator, reset_next_mpi_tag, MPI_COMM_NONEMPTY integer(c_int), bind(c) :: MPI_COMM_FEMTOOLS = MPI_COMM_WORLD + integer(c_int), bind(c), save :: MPI_COMM_NONEMPTY = MPI_COMM_WORLD + + integer, save :: last_tag = 0 interface allmax module procedure allmax_integer, allmax_real @@ -78,9 +82,13 @@ module parallel_tools contains + subroutine reset_next_mpi_tag() + call allmax(last_tag) + end subroutine reset_next_mpi_tag + integer function next_mpi_tag() #ifdef HAVE_MPI - integer, save::last_tag=0, tag_ub=0 + integer, save::tag_ub=0 integer flag, ierr if(tag_ub==0) then call MPI_Attr_get(MPI_COMM_FEMTOOLS, MPI_TAG_UB, tag_ub, flag, ierr) @@ -190,7 +198,7 @@ end function getnprocs logical function isparallel() !!< Return true if we are running in parallel, and false otherwise. - isparallel = (getnprocs()>1) + isparallel = (getnprocs(MPI_COMM_FEMTOOLS)>1) end function isparallel @@ -960,4 +968,32 @@ subroutine set_communicator(communicator) bind(c) end subroutine set_communicator + subroutine split_communicator(old_communicator, new_communicator, key) + integer(c_int), intent(in) :: old_communicator + integer(c_int), intent(out) :: new_communicator + logical, intent(in) :: key + + integer ::ierr, ikey + + if (key) then + ikey = 1 + else + ikey = 0 + end if + + call MPI_Comm_split(old_communicator, key, getrank(MPI_COMM_WORLD), new_communicator, ierr) + + end subroutine split_communicator + + subroutine free_communicator(communicator) + integer(c_int), intent(inout) :: communicator + integer :: ierr + + if ((communicator .ge. 0) .and. communicator .ne. MPI_COMM_WORLD) then + call MPI_Comm_free(communicator, ierr) + communicator =-1 + end if + + end subroutine free_communicator + end module parallel_tools diff --git a/femtools/Read_GMSH.F90 b/femtools/Read_GMSH.F90 index 928d02655b..0a0953e627 100644 --- a/femtools/Read_GMSH.F90 +++ b/femtools/Read_GMSH.F90 @@ -117,8 +117,16 @@ function read_gmsh_simple( filename, quad_degree, & call read_nodes_coords( fd, lfilename, gmshFormat, nodes ) ! Read in elements + dim = 0 call read_faces_and_elements( fd, lfilename, gmshFormat, & elements, faces, dim) + if (dim == 0) then + if (present(mdim)) then + dim = mdim + else + FLExit("Unable to ascertain mesh dimension") + end if + end if call read_node_column_IDs( fd, lfilename, gmshFormat, nodes ) @@ -174,9 +182,7 @@ function read_gmsh_simple( filename, quad_degree, & end if - if (present(mdim)) then - coordinate_dim = mdim - else if(have_option("/geometry/spherical_earth") ) then + if(have_option("/geometry/spherical_earth") ) then ! on the n-sphere the input mesh may be 1/2d (extrusion), or 3d but ! Coordinate is always geometry dimensional call get_option('/geometry/dimension', gdim) @@ -185,7 +191,13 @@ function read_gmsh_simple( filename, quad_degree, & coordinate_dim = dim end if - loc = size( elements(1)%nodeIDs ) + if (size(elements)>0) then + loc = size( elements(1)%nodeIDs ) + elseif (present(mdim)) then + loc = mdim+1 + else + FLExit("Can't find dimension of zero size file") + end if if (numFaces>0) then sloc = size( faces(1)%nodeIDs ) else @@ -214,7 +226,9 @@ function read_gmsh_simple( filename, quad_degree, & if (haveRegionIDs) then allocate( field%mesh%region_ids(numElements) ) end if - if(nodes(1)%columnID>=0) allocate(field%mesh%columns(1:numNodes)) + if (size(nodes)>0) then + if(nodes(1)%columnID>=0) allocate(field%mesh%columns(1:numNodes)) + end if ! Loop round nodes copying across coords and column IDs to field mesh, ! if they exist @@ -364,7 +378,7 @@ subroutine read_nodes_coords( fd, filename, gmshFormat, nodes ) read(fd, *) numNodes - if(numNodes < 2) then + if(numNodes > 0 .and. numNodes < 2) then FLExit("Error: GMSH number of nodes field < 2") end if @@ -387,7 +401,7 @@ subroutine read_nodes_coords( fd, filename, gmshFormat, nodes ) ! Skip newline character when in binary mode if( gmshFormat == binaryFormat ) then - read(fd) newlineChar + if(numNodes>0) read(fd), newlineChar call ascii_formatting(fd, filename, "read") end if @@ -525,8 +539,8 @@ subroutine read_faces_and_elements( fd, filename, gmshFormat, & read(fd,*) numAllElements ! Sanity check. - if(numAllElements<1) then - FLExit("Error: number of elements in GMSH file < 1") + if(numAllElements<0) then + FLExit("Error: number of elements in GMSH file < 0") end if allocate( allElements(numAllElements) ) @@ -592,13 +606,13 @@ subroutine read_faces_and_elements( fd, filename, gmshFormat, & ! Skip final newline if(gmshFormat==binaryFormat) then - read(fd) newlineChar + if (numAllElements>0) read(fd) newlineChar call ascii_formatting( fd, filename, "read" ) end if ! Check for $EndElements tag read(fd,*) charBuf - if( trim(charBuf) .ne. "$EndElements" ) then + if( trim(adjustl(charBuf)) .ne. "$EndElements" ) then FLExit("Error: cannot find '$EndElements' in GMSH mesh file") end if @@ -649,6 +663,9 @@ subroutine read_faces_and_elements( fd, filename, gmshFormat, & ! meshes are verboten: ! tet/hex, tet/quad, triangle/hex and triangle/quad + numElements=0 + numFaces=0 + if (numTets>0) then numElements = numTets elementType = GMSH_TET @@ -691,7 +708,7 @@ subroutine read_faces_and_elements( fd, filename, gmshFormat, & dim = 1 else - FLExit("Unsupported mixture of face/element types") + if (numAllElements>0) FLExit("Unsupported mixture of face/element types") end if call copy_to_faces_and_elements( allElements, & diff --git a/femtools/Write_GMSH.F90 b/femtools/Write_GMSH.F90 index 7c40156fc1..eb8142d5d6 100644 --- a/femtools/Write_GMSH.F90 +++ b/femtools/Write_GMSH.F90 @@ -206,7 +206,7 @@ subroutine write_gmsh_nodes( fd, lfilename, field, useBinaryGMSH ) ! Sanity check. if (numNodes==0) then - FLAbort("write_gmsh_nodes(): no nodes to write out") + ewrite(-1,*) "write_gmsh_nodes(): no nodes to write out" end if ! header line: nodes, dim, no attributes, no boundary markers @@ -231,9 +231,9 @@ subroutine write_gmsh_nodes( fd, lfilename, field, useBinaryGMSH ) end if end do - if( useBinaryGMSH) then + if( useBinaryGMSH ) then ! Write newline character - write(fd) char(10) + if (numNodes>0) write(fd) char(10) call ascii_formatting(fd, lfilename, "write") end if @@ -278,7 +278,12 @@ subroutine write_gmsh_faces_and_elements( fd, lfilename, mesh, & ! Sanity check. if (numGMSHElems==0) then - FLAbort("write_gmsh_faces_and_elements(): none of either!") + ewrite(-1,*) "write_gmsh_faces_and_elements(): none of either!" + call ascii_formatting(fd, lfilename, "write") + write(fd, "(A)") "$Elements" + write(fd, "(A)") "0" + write(fd, "(A)") "$EndElements" + return end if @@ -421,7 +426,7 @@ subroutine write_gmsh_faces_and_elements( fd, lfilename, mesh, & deallocate(lnodelist) end do - if(useBinaryGMSH) then + if(useBinaryGMSH .and. numElements>0) then write(fd, err=301) newLineChar end if diff --git a/preprocessor/Initialise_Fields.F90 b/preprocessor/Initialise_Fields.F90 index e8b3c601cd..6a9ebd5d37 100644 --- a/preprocessor/Initialise_Fields.F90 +++ b/preprocessor/Initialise_Fields.F90 @@ -90,6 +90,9 @@ recursive subroutine initialise_scalar_field(field, path, position, time, phase_ real value integer nid + !! the do nothing case + if (node_count(position) == 0) return + ! Find out whether initial condition is constant or generated by a ! python function (or comes from something else). if(have_option(trim(path)//"/constant")) then @@ -236,6 +239,9 @@ subroutine initialise_vector_field(field, path, position, time, phase_path) real :: longitude, latitude, scalars(2), x, y logical :: spherical_earth + !! the do nothing case + if (node_count(position) == 0) return + ! Find out whether initial condition is constant or generated by a ! a python function @@ -395,6 +401,9 @@ subroutine initialise_tensor_field(field, path, position, time, phase_path) ! Temporary field for calculating isotropic tensor fields from python... type(scalar_field) :: sfield type(vector_field) :: vfield + + !! the do nothing case + if (node_count(position) == 0) return ! Find out whether tensor is isotropic or symmetric or not is_isotropic=have_option(trim(path)//"/isotropic") diff --git a/preprocessor/Populate_State.F90 b/preprocessor/Populate_State.F90 index ac76e627cb..083d2d4d9f 100644 --- a/preprocessor/Populate_State.F90 +++ b/preprocessor/Populate_State.F90 @@ -271,18 +271,14 @@ subroutine insert_external_mesh(states, save_vtk_cache) case ("triangle", "gmsh", "exodusii") ! Get mesh dimension if present call get_option(trim(mesh_path)//"/from_file/dimension", mdim, stat) + if(stat/=0) call get_option("/geometry/dimension", mdim) ! Read mesh - if(stat==0) then - position=read_mesh_files(trim(mesh_file_name), & - quad_degree=quad_degree, & - quad_family=quad_family, mdim=mdim, & - format=mesh_file_format) - else - position=read_mesh_files(trim(mesh_file_name), & - quad_degree=quad_degree, & - quad_family=quad_family, & - format=mesh_file_format) - end if + + position=read_mesh_files(trim(mesh_file_name), & + quad_degree=quad_degree, & + quad_family=quad_family, mdim=mdim, & + format=mesh_file_format) + ! After successfully reading in an ExodusII mesh, change the option ! mesh file format to "gmsh", as the write routines for ExodusII are currently ! not implemented. Thus, checkpoints etc are dumped as gmsh mesh files @@ -431,6 +427,9 @@ subroutine insert_external_mesh(states, save_vtk_cache) if (no_active_processes == 1) then call create_empty_halo(position) else + call reset_next_mpi_tag() + call split_communicator(MPI_COMM_FEMTOOLS, MPI_COMM_NONEMPTY, & + node_count(position)>0) call read_halos(mesh_file_name, position) end if ! Local element ordering needs to be consistent between processes, otherwise diff --git a/tests/empty_partitions/Makefile b/tests/empty_partitions/Makefile new file mode 100644 index 0000000000..f92dc95374 --- /dev/null +++ b/tests/empty_partitions/Makefile @@ -0,0 +1,26 @@ +FLMLMODEL = empty_partitions + +default: input + +input: +# gmsh -2 -bin square.geo + +clean: clean-mesh clean-run-debug +clean-mesh: +# rm -f *.ele *.edge *.node *.msh *.halo +clean-run: + rm -f $(FLMLMODEL)_?*.pvtu $(FLMLMODEL)_?*.vtu + rm -f $(FLMLMODEL).detectors $(FLMLMODEL).stat tmpf* + rm -f fluidity.err-?* fluidity.log-?* + rm -f matrixdump matrixdump.info + rm -f $(FLMLMODEL)_?*_checkpoint.flml + rm -f $(FLMLMODEL)_?*_checkpoint_?*.ele $(FLMLMODEL)_?*_checkpoint_?*.edge $(FLMLMODEL)_?*_checkpoint_?*.halo $(FLMLMODEL)_?*_checkpoint_?*.node + rm -f $(FLMLMODEL)_?*_?*_checkpoint.pvtu $(FLMLMODEL)_?*_?*_checkpoint.vtu + rm -f $(FLMLMODEL)_?*_checkpoint.ele $(FLMLMODEL)_?*_checkpoint.edge $(FLMLMODEL)_?*_checkpoint.node + rm -f $(FLMLMODEL)_?*_checkpoint.vtu + rm -rf $(FLMLMODEL)_?* +clean-run-debug: clean-run + rm -f adapted_mesh_?*.vtu bounding_box_?*_?*.vtu final_metric_?*.vtu gmon.out gradation_metric_?*.vtu interpolation_metric_final_?*.vtu interpolation_metric_hessian_?*_?*.vtu interpolation_metric_merge_?*_?*.vtu interpolation_metric_metric_?*_?*.vtu metric_input_?*_?*.vtu + rm -f adapted_mesh.edge adapted_mesh.ele adapted_mesh.node + rm -f adapted_mesh_?*.edge adapted_mesh_?*.ele adapted_mesh_?*.node + rm -f adapted_state_?*.pvtu adapted_state_?*.vtu diff --git a/tests/empty_partitions/empty_partitions.flml b/tests/empty_partitions/empty_partitions.flml new file mode 100644 index 0000000000..90df102469 --- /dev/null +++ b/tests/empty_partitions/empty_partitions.flml @@ -0,0 +1,256 @@ + + + + empty_partitions + + + fluids + + + + 2 + + + + + + + + + + + + + + + 0 + + + + discontinuous + + + + + + + + + 5 + + + + + + vtk + + + + 0 + + + + + + 2 + + + + + + + 0.0 + + + 1.0 + + + 4.0 + + + + + + + + 1.0 + + + + + + + + + + + + + only first timestep + + + + + + + + 1.0e-7 + + + 1000 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1.0 + + + + + 0.0 + + + 1.0 + + + + + + + 1.0e-7 + + + 1000 + + + + + + + + + 0 0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + + + 1000000 + + + + + + def val(X,t): + a = 1.0/max(1.0,0.5*t*t) + return [[a,0],[0,a]] + + + + + + + def val(X,t): + a = 1.0/max(1.0,0.5*t*t) + return [[a,0],[0,a]] + + + + + + diff --git a/tests/empty_partitions/empty_partitions.xml b/tests/empty_partitions/empty_partitions.xml new file mode 100644 index 0000000000..4e62150586 --- /dev/null +++ b/tests/empty_partitions/empty_partitions.xml @@ -0,0 +1,73 @@ + + + empty_partitions + + flml parallel + + make clean-run-debug; mpiexec fluidity -l empty_partitions.flml; mpiexec fluidity -l empty_partitions_2_checkpoint.flml + + + +import os +files = os.listdir("./") +solvers_converged = not "matrixdump" in files and not "matrixdump.info" in files + + +import fluidity_tools +s = fluidity_tools.stat_parser("empty_partitions.stat") +timesteps = len(s["ElapsedTime"]["value"]) + + +import fluidity_tools +s = fluidity_tools.stat_parser("empty_partitions_checkpoint.stat") +checkpoint_timesteps = len(s["ElapsedTime"]["value"]) + + +import vtk +reader=vtk.vtkXMLPUnstructuredGridReader() +reader.SetFileName("empty_partitions_1.pvtu") +reader.Update() +V = reader.GetOutput() +owners = 0 +o = [] +for i in range(V.GetNumberOfPoints()): + O = V.GetPointData().GetArray("NodeOwner").GetValue(i) + if O not in o: + o.append(O) + owners += 1 + + +import vtk +reader=vtk.vtkXMLPUnstructuredGridReader() +reader.SetFileName("empty_partitions_4.pvtu") +reader.Update() +V = reader.GetOutput() +final_owners = 0 +o = [] +for i in range(V.GetNumberOfPoints()): + O = V.GetPointData().GetArray("NodeOwner").GetValue(i) + if O not in o: + o.append(O) + final_owners += 1 + + + + + assert(solvers_converged) + + +assert(timesteps == 4) + + +assert(checkpoint_timesteps == 2) + + +assert(owners < 4) + + +assert(final_owners == 4) + + + + + diff --git a/tests/empty_partitions/square.geo b/tests/empty_partitions/square.geo new file mode 100644 index 0000000000..be1fdabb34 --- /dev/null +++ b/tests/empty_partitions/square.geo @@ -0,0 +1,23 @@ +N = 2; + +Point(1) ={0,0,0}; +Point(2) ={1,0,0}; +Point(3) ={1,1,0}; +Point(4) ={0,1,0}; + +Line(1) = {1,2}; +Line(2) = {2,3}; +Line(3) = {3,4}; +Line(4) = {4,1}; + + +Transfinite Line {1,2,3,4} = N; + +Line Loop(1) = {1,2,3,4}; +Plane Surface(1) = 1; + +Transfinite Surface {1}; + +Physical Surface(1) =1; +Physical Line(1) ={1,2,3,4}; + diff --git a/tests/empty_partitions/square_0.halo b/tests/empty_partitions/square_0.halo new file mode 100644 index 0000000000..9290690d18 --- /dev/null +++ b/tests/empty_partitions/square_0.halo @@ -0,0 +1,39 @@ + + + + + + + + + 1 + 2 + + + 1 + 3 + + + + + + + + + + + + + 1 + 2 + + + 1 + 3 + + + 1 + 4 + + + diff --git a/tests/empty_partitions/square_0.msh b/tests/empty_partitions/square_0.msh new file mode 100644 index 0000000000..53f7e757c4 Binary files /dev/null and b/tests/empty_partitions/square_0.msh differ diff --git a/tests/empty_partitions/square_1.halo b/tests/empty_partitions/square_1.halo new file mode 100644 index 0000000000..b59984f022 --- /dev/null +++ b/tests/empty_partitions/square_1.halo @@ -0,0 +1,39 @@ + + + + + 1 + 2 + + + + + + + 1 + 4 + + + 1 + 3 + + + + + 1 + 2 + + + + + + + 1 + 4 + + + 1 + 3 + + + diff --git a/tests/empty_partitions/square_1.msh b/tests/empty_partitions/square_1.msh new file mode 100644 index 0000000000..aeb4e1be16 Binary files /dev/null and b/tests/empty_partitions/square_1.msh differ diff --git a/tests/empty_partitions/square_2.halo b/tests/empty_partitions/square_2.halo new file mode 100644 index 0000000000..6d07a6be07 --- /dev/null +++ b/tests/empty_partitions/square_2.halo @@ -0,0 +1,39 @@ + + + + + 1 + 2 + + + 1 + 3 + + + + + + + 1 + 4 + + + + + 1 + 2 + + + 1 + 3 + + + + + + + 1 + 4 + + + diff --git a/tests/empty_partitions/square_2.msh b/tests/empty_partitions/square_2.msh new file mode 100644 index 0000000000..90756487d9 Binary files /dev/null and b/tests/empty_partitions/square_2.msh differ diff --git a/tests/empty_partitions/square_3.halo b/tests/empty_partitions/square_3.halo new file mode 100644 index 0000000000..1940533910 --- /dev/null +++ b/tests/empty_partitions/square_3.halo @@ -0,0 +1,39 @@ + + + + + + + + + 1 + 2 + + + 1 + 3 + + + + + + + + + 1 + 4 + + + 1 + 2 + + + 1 + 3 + + + + + + + diff --git a/tests/empty_partitions/square_3.msh b/tests/empty_partitions/square_3.msh new file mode 100644 index 0000000000..6e5c7e7861 Binary files /dev/null and b/tests/empty_partitions/square_3.msh differ